content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
# This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common get_config new_operation new_request send_request
#' @include redshift_service.R
NULL
#' Exchanges a DC1 Reserved Node for a DC2 Reserved Node with no changes to
#' the configuration (term, payment type, or number of nodes) and no
#' additional costs
#'
#' @description
#' Exchanges a DC1 Reserved Node for a DC2 Reserved Node with no changes to
#' the configuration (term, payment type, or number of nodes) and no
#' additional costs.
#'
#' @usage
#' redshift_accept_reserved_node_exchange(ReservedNodeId,
#' TargetReservedNodeOfferingId)
#'
#' @param ReservedNodeId [required] A string representing the node identifier of the DC1 Reserved Node to be
#' exchanged.
#' @param TargetReservedNodeOfferingId [required] The unique identifier of the DC2 Reserved Node offering to be used for
#' the exchange. You can obtain the value for the parameter by calling
#' [`get_reserved_node_exchange_offerings`][redshift_get_reserved_node_exchange_offerings]
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' ExchangedReservedNode = list(
#' ReservedNodeId = "string",
#' ReservedNodeOfferingId = "string",
#' NodeType = "string",
#' StartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' Duration = 123,
#' FixedPrice = 123.0,
#' UsagePrice = 123.0,
#' CurrencyCode = "string",
#' NodeCount = 123,
#' State = "string",
#' OfferingType = "string",
#' RecurringCharges = list(
#' list(
#' RecurringChargeAmount = 123.0,
#' RecurringChargeFrequency = "string"
#' )
#' ),
#' ReservedNodeOfferingType = "Regular"|"Upgradable"
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$accept_reserved_node_exchange(
#' ReservedNodeId = "string",
#' TargetReservedNodeOfferingId = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_accept_reserved_node_exchange
redshift_accept_reserved_node_exchange <- function(ReservedNodeId, TargetReservedNodeOfferingId) {
op <- new_operation(
name = "AcceptReservedNodeExchange",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$accept_reserved_node_exchange_input(ReservedNodeId = ReservedNodeId, TargetReservedNodeOfferingId = TargetReservedNodeOfferingId)
output <- .redshift$accept_reserved_node_exchange_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$accept_reserved_node_exchange <- redshift_accept_reserved_node_exchange
#' Adds an inbound (ingress) rule to an Amazon Redshift security group
#'
#' @description
#' Adds an inbound (ingress) rule to an Amazon Redshift security group.
#' Depending on whether the application accessing your cluster is running
#' on the Internet or an Amazon EC2 instance, you can authorize inbound
#' access to either a Classless Interdomain Routing (CIDR)/Internet
#' Protocol (IP) range or to an Amazon EC2 security group. You can add as
#' many as 20 ingress rules to an Amazon Redshift security group.
#'
#' If you authorize access to an Amazon EC2 security group, specify
#' *EC2SecurityGroupName* and *EC2SecurityGroupOwnerId*. The Amazon EC2
#' security group and Amazon Redshift cluster must be in the same AWS
#' Region.
#'
#' If you authorize access to a CIDR/IP address range, specify *CIDRIP*.
#' For an overview of CIDR blocks, see the Wikipedia article on [Classless
#' Inter-Domain
#' Routing](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing).
#'
#' You must also associate the security group with a cluster so that
#' clients running on these IP addresses or the EC2 instance are authorized
#' to connect to the cluster. For information about managing security
#' groups, go to [Working with Security
#' Groups](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-security-groups.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' @usage
#' redshift_authorize_cluster_security_group_ingress(
#' ClusterSecurityGroupName, CIDRIP, EC2SecurityGroupName,
#' EC2SecurityGroupOwnerId)
#'
#' @param ClusterSecurityGroupName [required] The name of the security group to which the ingress rule is added.
#' @param CIDRIP The IP range to be added the Amazon Redshift security group.
#' @param EC2SecurityGroupName The EC2 security group to be added the Amazon Redshift security group.
#' @param EC2SecurityGroupOwnerId The AWS account number of the owner of the security group specified by
#' the *EC2SecurityGroupName* parameter. The AWS Access Key ID is not an
#' acceptable value.
#'
#' Example: `111122223333`
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' ClusterSecurityGroup = list(
#' ClusterSecurityGroupName = "string",
#' Description = "string",
#' EC2SecurityGroups = list(
#' list(
#' Status = "string",
#' EC2SecurityGroupName = "string",
#' EC2SecurityGroupOwnerId = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' ),
#' IPRanges = list(
#' list(
#' Status = "string",
#' CIDRIP = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' ),
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$authorize_cluster_security_group_ingress(
#' ClusterSecurityGroupName = "string",
#' CIDRIP = "string",
#' EC2SecurityGroupName = "string",
#' EC2SecurityGroupOwnerId = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_authorize_cluster_security_group_ingress
redshift_authorize_cluster_security_group_ingress <- function(ClusterSecurityGroupName, CIDRIP = NULL, EC2SecurityGroupName = NULL, EC2SecurityGroupOwnerId = NULL) {
op <- new_operation(
name = "AuthorizeClusterSecurityGroupIngress",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$authorize_cluster_security_group_ingress_input(ClusterSecurityGroupName = ClusterSecurityGroupName, CIDRIP = CIDRIP, EC2SecurityGroupName = EC2SecurityGroupName, EC2SecurityGroupOwnerId = EC2SecurityGroupOwnerId)
output <- .redshift$authorize_cluster_security_group_ingress_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$authorize_cluster_security_group_ingress <- redshift_authorize_cluster_security_group_ingress
#' Authorizes the specified AWS customer account to restore the specified
#' snapshot
#'
#' @description
#' Authorizes the specified AWS customer account to restore the specified
#' snapshot.
#'
#' For more information about working with snapshots, go to [Amazon
#' Redshift
#' Snapshots](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-snapshots.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' @usage
#' redshift_authorize_snapshot_access(SnapshotIdentifier,
#' SnapshotClusterIdentifier, AccountWithRestoreAccess)
#'
#' @param SnapshotIdentifier [required] The identifier of the snapshot the account is authorized to restore.
#' @param SnapshotClusterIdentifier The identifier of the cluster the snapshot was created from. This
#' parameter is required if your IAM user has a policy containing a
#' snapshot resource element that specifies anything other than * for the
#' cluster name.
#' @param AccountWithRestoreAccess [required] The identifier of the AWS customer account authorized to restore the
#' specified snapshot.
#'
#' To share a snapshot with AWS support, specify amazon-redshift-support.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Snapshot = list(
#' SnapshotIdentifier = "string",
#' ClusterIdentifier = "string",
#' SnapshotCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' Status = "string",
#' Port = 123,
#' AvailabilityZone = "string",
#' ClusterCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' MasterUsername = "string",
#' ClusterVersion = "string",
#' EngineFullVersion = "string",
#' SnapshotType = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' DBName = "string",
#' VpcId = "string",
#' Encrypted = TRUE|FALSE,
#' KmsKeyId = "string",
#' EncryptedWithHSM = TRUE|FALSE,
#' AccountsWithRestoreAccess = list(
#' list(
#' AccountId = "string",
#' AccountAlias = "string"
#' )
#' ),
#' OwnerAccount = "string",
#' TotalBackupSizeInMegaBytes = 123.0,
#' ActualIncrementalBackupSizeInMegaBytes = 123.0,
#' BackupProgressInMegaBytes = 123.0,
#' CurrentBackupRateInMegaBytesPerSecond = 123.0,
#' EstimatedSecondsToCompletion = 123,
#' ElapsedTimeInSeconds = 123,
#' SourceRegion = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' ),
#' RestorableNodeTypes = list(
#' "string"
#' ),
#' EnhancedVpcRouting = TRUE|FALSE,
#' MaintenanceTrackName = "string",
#' ManualSnapshotRetentionPeriod = 123,
#' ManualSnapshotRemainingDays = 123,
#' SnapshotRetentionStartTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$authorize_snapshot_access(
#' SnapshotIdentifier = "string",
#' SnapshotClusterIdentifier = "string",
#' AccountWithRestoreAccess = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_authorize_snapshot_access
redshift_authorize_snapshot_access <- function(SnapshotIdentifier, SnapshotClusterIdentifier = NULL, AccountWithRestoreAccess) {
op <- new_operation(
name = "AuthorizeSnapshotAccess",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$authorize_snapshot_access_input(SnapshotIdentifier = SnapshotIdentifier, SnapshotClusterIdentifier = SnapshotClusterIdentifier, AccountWithRestoreAccess = AccountWithRestoreAccess)
output <- .redshift$authorize_snapshot_access_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$authorize_snapshot_access <- redshift_authorize_snapshot_access
#' Deletes a set of cluster snapshots
#'
#' @description
#' Deletes a set of cluster snapshots.
#'
#' @usage
#' redshift_batch_delete_cluster_snapshots(Identifiers)
#'
#' @param Identifiers [required] A list of identifiers for the snapshots that you want to delete.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Resources = list(
#' "string"
#' ),
#' Errors = list(
#' list(
#' SnapshotIdentifier = "string",
#' SnapshotClusterIdentifier = "string",
#' FailureCode = "string",
#' FailureReason = "string"
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$batch_delete_cluster_snapshots(
#' Identifiers = list(
#' list(
#' SnapshotIdentifier = "string",
#' SnapshotClusterIdentifier = "string"
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_batch_delete_cluster_snapshots
redshift_batch_delete_cluster_snapshots <- function(Identifiers) {
op <- new_operation(
name = "BatchDeleteClusterSnapshots",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$batch_delete_cluster_snapshots_input(Identifiers = Identifiers)
output <- .redshift$batch_delete_cluster_snapshots_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$batch_delete_cluster_snapshots <- redshift_batch_delete_cluster_snapshots
#' Modifies the settings for a set of cluster snapshots
#'
#' @description
#' Modifies the settings for a set of cluster snapshots.
#'
#' @usage
#' redshift_batch_modify_cluster_snapshots(SnapshotIdentifierList,
#' ManualSnapshotRetentionPeriod, Force)
#'
#' @param SnapshotIdentifierList [required] A list of snapshot identifiers you want to modify.
#' @param ManualSnapshotRetentionPeriod The number of days that a manual snapshot is retained. If you specify
#' the value -1, the manual snapshot is retained indefinitely.
#'
#' The number must be either -1 or an integer between 1 and 3,653.
#'
#' If you decrease the manual snapshot retention period from its current
#' value, existing manual snapshots that fall outside of the new retention
#' period will return an error. If you want to suppress the errors and
#' delete the snapshots, use the force option.
#' @param Force A boolean value indicating whether to override an exception if the
#' retention period has passed.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Resources = list(
#' "string"
#' ),
#' Errors = list(
#' list(
#' SnapshotIdentifier = "string",
#' SnapshotClusterIdentifier = "string",
#' FailureCode = "string",
#' FailureReason = "string"
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$batch_modify_cluster_snapshots(
#' SnapshotIdentifierList = list(
#' "string"
#' ),
#' ManualSnapshotRetentionPeriod = 123,
#' Force = TRUE|FALSE
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_batch_modify_cluster_snapshots
redshift_batch_modify_cluster_snapshots <- function(SnapshotIdentifierList, ManualSnapshotRetentionPeriod = NULL, Force = NULL) {
op <- new_operation(
name = "BatchModifyClusterSnapshots",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$batch_modify_cluster_snapshots_input(SnapshotIdentifierList = SnapshotIdentifierList, ManualSnapshotRetentionPeriod = ManualSnapshotRetentionPeriod, Force = Force)
output <- .redshift$batch_modify_cluster_snapshots_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$batch_modify_cluster_snapshots <- redshift_batch_modify_cluster_snapshots
#' Cancels a resize operation for a cluster
#'
#' @description
#' Cancels a resize operation for a cluster.
#'
#' @usage
#' redshift_cancel_resize(ClusterIdentifier)
#'
#' @param ClusterIdentifier [required] The unique identifier for the cluster that you want to cancel a resize
#' operation for.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' TargetNodeType = "string",
#' TargetNumberOfNodes = 123,
#' TargetClusterType = "string",
#' Status = "string",
#' ImportTablesCompleted = list(
#' "string"
#' ),
#' ImportTablesInProgress = list(
#' "string"
#' ),
#' ImportTablesNotStarted = list(
#' "string"
#' ),
#' AvgResizeRateInMegaBytesPerSecond = 123.0,
#' TotalResizeDataInMegaBytes = 123,
#' ProgressInMegaBytes = 123,
#' ElapsedTimeInSeconds = 123,
#' EstimatedTimeToCompletionInSeconds = 123,
#' ResizeType = "string",
#' Message = "string",
#' TargetEncryptionType = "string",
#' DataTransferProgressPercent = 123.0
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$cancel_resize(
#' ClusterIdentifier = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_cancel_resize
redshift_cancel_resize <- function(ClusterIdentifier) {
op <- new_operation(
name = "CancelResize",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$cancel_resize_input(ClusterIdentifier = ClusterIdentifier)
output <- .redshift$cancel_resize_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$cancel_resize <- redshift_cancel_resize
#' Copies the specified automated cluster snapshot to a new manual cluster
#' snapshot
#'
#' @description
#' Copies the specified automated cluster snapshot to a new manual cluster
#' snapshot. The source must be an automated snapshot and it must be in the
#' available state.
#'
#' When you delete a cluster, Amazon Redshift deletes any automated
#' snapshots of the cluster. Also, when the retention period of the
#' snapshot expires, Amazon Redshift automatically deletes it. If you want
#' to keep an automated snapshot for a longer period, you can make a manual
#' copy of the snapshot. Manual snapshots are retained until you delete
#' them.
#'
#' For more information about working with snapshots, go to [Amazon
#' Redshift
#' Snapshots](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-snapshots.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' @usage
#' redshift_copy_cluster_snapshot(SourceSnapshotIdentifier,
#' SourceSnapshotClusterIdentifier, TargetSnapshotIdentifier,
#' ManualSnapshotRetentionPeriod)
#'
#' @param SourceSnapshotIdentifier [required] The identifier for the source snapshot.
#'
#' Constraints:
#'
#' - Must be the identifier for a valid automated snapshot whose state is
#' `available`.
#' @param SourceSnapshotClusterIdentifier The identifier of the cluster the source snapshot was created from. This
#' parameter is required if your IAM user has a policy containing a
#' snapshot resource element that specifies anything other than * for the
#' cluster name.
#'
#' Constraints:
#'
#' - Must be the identifier for a valid cluster.
#' @param TargetSnapshotIdentifier [required] The identifier given to the new manual snapshot.
#'
#' Constraints:
#'
#' - Cannot be null, empty, or blank.
#'
#' - Must contain from 1 to 255 alphanumeric characters or hyphens.
#'
#' - First character must be a letter.
#'
#' - Cannot end with a hyphen or contain two consecutive hyphens.
#'
#' - Must be unique for the AWS account that is making the request.
#' @param ManualSnapshotRetentionPeriod The number of days that a manual snapshot is retained. If the value is
#' -1, the manual snapshot is retained indefinitely.
#'
#' The value must be either -1 or an integer between 1 and 3,653.
#'
#' The default value is -1.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Snapshot = list(
#' SnapshotIdentifier = "string",
#' ClusterIdentifier = "string",
#' SnapshotCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' Status = "string",
#' Port = 123,
#' AvailabilityZone = "string",
#' ClusterCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' MasterUsername = "string",
#' ClusterVersion = "string",
#' EngineFullVersion = "string",
#' SnapshotType = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' DBName = "string",
#' VpcId = "string",
#' Encrypted = TRUE|FALSE,
#' KmsKeyId = "string",
#' EncryptedWithHSM = TRUE|FALSE,
#' AccountsWithRestoreAccess = list(
#' list(
#' AccountId = "string",
#' AccountAlias = "string"
#' )
#' ),
#' OwnerAccount = "string",
#' TotalBackupSizeInMegaBytes = 123.0,
#' ActualIncrementalBackupSizeInMegaBytes = 123.0,
#' BackupProgressInMegaBytes = 123.0,
#' CurrentBackupRateInMegaBytesPerSecond = 123.0,
#' EstimatedSecondsToCompletion = 123,
#' ElapsedTimeInSeconds = 123,
#' SourceRegion = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' ),
#' RestorableNodeTypes = list(
#' "string"
#' ),
#' EnhancedVpcRouting = TRUE|FALSE,
#' MaintenanceTrackName = "string",
#' ManualSnapshotRetentionPeriod = 123,
#' ManualSnapshotRemainingDays = 123,
#' SnapshotRetentionStartTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$copy_cluster_snapshot(
#' SourceSnapshotIdentifier = "string",
#' SourceSnapshotClusterIdentifier = "string",
#' TargetSnapshotIdentifier = "string",
#' ManualSnapshotRetentionPeriod = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_copy_cluster_snapshot
redshift_copy_cluster_snapshot <- function(SourceSnapshotIdentifier, SourceSnapshotClusterIdentifier = NULL, TargetSnapshotIdentifier, ManualSnapshotRetentionPeriod = NULL) {
op <- new_operation(
name = "CopyClusterSnapshot",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$copy_cluster_snapshot_input(SourceSnapshotIdentifier = SourceSnapshotIdentifier, SourceSnapshotClusterIdentifier = SourceSnapshotClusterIdentifier, TargetSnapshotIdentifier = TargetSnapshotIdentifier, ManualSnapshotRetentionPeriod = ManualSnapshotRetentionPeriod)
output <- .redshift$copy_cluster_snapshot_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$copy_cluster_snapshot <- redshift_copy_cluster_snapshot
#' Creates a new cluster with the specified parameters
#'
#' @description
#' Creates a new cluster with the specified parameters.
#'
#' To create a cluster in Virtual Private Cloud (VPC), you must provide a
#' cluster subnet group name. The cluster subnet group identifies the
#' subnets of your VPC that Amazon Redshift uses when creating the cluster.
#' For more information about managing clusters, go to [Amazon Redshift
#' Clusters](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' @usage
#' redshift_create_cluster(DBName, ClusterIdentifier, ClusterType,
#' NodeType, MasterUsername, MasterUserPassword, ClusterSecurityGroups,
#' VpcSecurityGroupIds, ClusterSubnetGroupName, AvailabilityZone,
#' PreferredMaintenanceWindow, ClusterParameterGroupName,
#' AutomatedSnapshotRetentionPeriod, ManualSnapshotRetentionPeriod, Port,
#' ClusterVersion, AllowVersionUpgrade, NumberOfNodes, PubliclyAccessible,
#' Encrypted, HsmClientCertificateIdentifier, HsmConfigurationIdentifier,
#' ElasticIp, Tags, KmsKeyId, EnhancedVpcRouting, AdditionalInfo, IamRoles,
#' MaintenanceTrackName, SnapshotScheduleIdentifier,
#' AvailabilityZoneRelocation)
#'
#' @param DBName The name of the first database to be created when the cluster is
#' created.
#'
#' To create additional databases after the cluster is created, connect to
#' the cluster with a SQL client and use SQL commands to create a database.
#' For more information, go to [Create a
#' Database](https://docs.aws.amazon.com/redshift/latest/gsg/t_creating_database.html)
#' in the Amazon Redshift Database Developer Guide.
#'
#' Default: `dev`
#'
#' Constraints:
#'
#' - Must contain 1 to 64 alphanumeric characters.
#'
#' - Must contain only lowercase letters.
#'
#' - Cannot be a word that is reserved by the service. A list of reserved
#' words can be found in [Reserved
#' Words](https://docs.aws.amazon.com/redshift/latest/dg/r_pg_keywords.html)
#' in the Amazon Redshift Database Developer Guide.
#' @param ClusterIdentifier [required] A unique identifier for the cluster. You use this identifier to refer to
#' the cluster for any subsequent cluster operations such as deleting or
#' modifying. The identifier also appears in the Amazon Redshift console.
#'
#' Constraints:
#'
#' - Must contain from 1 to 63 alphanumeric characters or hyphens.
#'
#' - Alphabetic characters must be lowercase.
#'
#' - First character must be a letter.
#'
#' - Cannot end with a hyphen or contain two consecutive hyphens.
#'
#' - Must be unique for all clusters within an AWS account.
#'
#' Example: `myexamplecluster`
#' @param ClusterType The type of the cluster. When cluster type is specified as
#'
#' - `single-node`, the **NumberOfNodes** parameter is not required.
#'
#' - `multi-node`, the **NumberOfNodes** parameter is required.
#'
#' Valid Values: `multi-node` | `single-node`
#'
#' Default: `multi-node`
#' @param NodeType [required] The node type to be provisioned for the cluster. For information about
#' node types, go to [Working with
#' Clusters](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#how-many-nodes)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' Valid Values: `ds2.xlarge` | `ds2.8xlarge` | `dc1.large` | `dc1.8xlarge`
#' | `dc2.large` | `dc2.8xlarge` | `ra3.xlplus` | `ra3.4xlarge` |
#' `ra3.16xlarge`
#' @param MasterUsername [required] The user name associated with the master user account for the cluster
#' that is being created.
#'
#' Constraints:
#'
#' - Must be 1 - 128 alphanumeric characters. The user name can't be
#' `PUBLIC`.
#'
#' - First character must be a letter.
#'
#' - Cannot be a reserved word. A list of reserved words can be found in
#' [Reserved
#' Words](https://docs.aws.amazon.com/redshift/latest/dg/r_pg_keywords.html)
#' in the Amazon Redshift Database Developer Guide.
#' @param MasterUserPassword [required] The password associated with the master user account for the cluster
#' that is being created.
#'
#' Constraints:
#'
#' - Must be between 8 and 64 characters in length.
#'
#' - Must contain at least one uppercase letter.
#'
#' - Must contain at least one lowercase letter.
#'
#' - Must contain one number.
#'
#' - Can be any printable ASCII character (ASCII code 33 to 126) except '
#' (single quote), " (double quote), \\, /, @@, or space.
#' @param ClusterSecurityGroups A list of security groups to be associated with this cluster.
#'
#' Default: The default cluster security group for Amazon Redshift.
#' @param VpcSecurityGroupIds A list of Virtual Private Cloud (VPC) security groups to be associated
#' with the cluster.
#'
#' Default: The default VPC security group is associated with the cluster.
#' @param ClusterSubnetGroupName The name of a cluster subnet group to be associated with this cluster.
#'
#' If this parameter is not provided the resulting cluster will be deployed
#' outside virtual private cloud (VPC).
#' @param AvailabilityZone The EC2 Availability Zone (AZ) in which you want Amazon Redshift to
#' provision the cluster. For example, if you have several EC2 instances
#' running in a specific Availability Zone, then you might want the cluster
#' to be provisioned in the same zone in order to decrease network latency.
#'
#' Default: A random, system-chosen Availability Zone in the region that is
#' specified by the endpoint.
#'
#' Example: `us-east-2d`
#'
#' Constraint: The specified Availability Zone must be in the same region
#' as the current endpoint.
#' @param PreferredMaintenanceWindow The weekly time range (in UTC) during which automated cluster
#' maintenance can occur.
#'
#' Format: `ddd:hh24:mi-ddd:hh24:mi`
#'
#' Default: A 30-minute window selected at random from an 8-hour block of
#' time per region, occurring on a random day of the week. For more
#' information about the time blocks for each region, see [Maintenance
#' Windows](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#rs-maintenance-windows)
#' in Amazon Redshift Cluster Management Guide.
#'
#' Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun
#'
#' Constraints: Minimum 30-minute window.
#' @param ClusterParameterGroupName The name of the parameter group to be associated with this cluster.
#'
#' Default: The default Amazon Redshift cluster parameter group. For
#' information about the default parameter group, go to [Working with
#' Amazon Redshift Parameter
#' Groups](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html)
#'
#' Constraints:
#'
#' - Must be 1 to 255 alphanumeric characters or hyphens.
#'
#' - First character must be a letter.
#'
#' - Cannot end with a hyphen or contain two consecutive hyphens.
#' @param AutomatedSnapshotRetentionPeriod The number of days that automated snapshots are retained. If the value
#' is 0, automated snapshots are disabled. Even if automated snapshots are
#' disabled, you can still create manual snapshots when you want with
#' [`create_cluster_snapshot`][redshift_create_cluster_snapshot].
#'
#' Default: `1`
#'
#' Constraints: Must be a value from 0 to 35.
#' @param ManualSnapshotRetentionPeriod The default number of days to retain a manual snapshot. If the value is
#' -1, the snapshot is retained indefinitely. This setting doesn't change
#' the retention period of existing snapshots.
#'
#' The value must be either -1 or an integer between 1 and 3,653.
#' @param Port The port number on which the cluster accepts incoming connections.
#'
#' The cluster is accessible only via the JDBC and ODBC connection strings.
#' Part of the connection string requires the port on which the cluster
#' will listen for incoming connections.
#'
#' Default: `5439`
#'
#' Valid Values: `1150-65535`
#' @param ClusterVersion The version of the Amazon Redshift engine software that you want to
#' deploy on the cluster.
#'
#' The version selected runs on all the nodes in the cluster.
#'
#' Constraints: Only version 1.0 is currently available.
#'
#' Example: `1.0`
#' @param AllowVersionUpgrade If `true`, major version upgrades can be applied during the maintenance
#' window to the Amazon Redshift engine that is running on the cluster.
#'
#' When a new major version of the Amazon Redshift engine is released, you
#' can request that the service automatically apply upgrades during the
#' maintenance window to the Amazon Redshift engine that is running on your
#' cluster.
#'
#' Default: `true`
#' @param NumberOfNodes The number of compute nodes in the cluster. This parameter is required
#' when the **ClusterType** parameter is specified as `multi-node`.
#'
#' For information about determining how many nodes you need, go to
#' [Working with
#' Clusters](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#how-many-nodes)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' If you don't specify this parameter, you get a single-node cluster. When
#' requesting a multi-node cluster, you must specify the number of nodes
#' that you want in the cluster.
#'
#' Default: `1`
#'
#' Constraints: Value must be at least 1 and no more than 100.
#' @param PubliclyAccessible If `true`, the cluster can be accessed from a public network.
#' @param Encrypted If `true`, the data in the cluster is encrypted at rest.
#'
#' Default: false
#' @param HsmClientCertificateIdentifier Specifies the name of the HSM client certificate the Amazon Redshift
#' cluster uses to retrieve the data encryption keys stored in an HSM.
#' @param HsmConfigurationIdentifier Specifies the name of the HSM configuration that contains the
#' information the Amazon Redshift cluster can use to retrieve and store
#' keys in an HSM.
#' @param ElasticIp The Elastic IP (EIP) address for the cluster.
#'
#' Constraints: The cluster must be provisioned in EC2-VPC and
#' publicly-accessible through an Internet gateway. For more information
#' about provisioning clusters in EC2-VPC, go to [Supported Platforms to
#' Launch Your
#' Cluster](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#cluster-platforms)
#' in the Amazon Redshift Cluster Management Guide.
#' @param Tags A list of tag instances.
#' @param KmsKeyId The AWS Key Management Service (KMS) key ID of the encryption key that
#' you want to use to encrypt data in the cluster.
#' @param EnhancedVpcRouting An option that specifies whether to create the cluster with enhanced VPC
#' routing enabled. To create a cluster that uses enhanced VPC routing, the
#' cluster must be in a VPC. For more information, see [Enhanced VPC
#' Routing](https://docs.aws.amazon.com/redshift/latest/mgmt/enhanced-vpc-routing.html)
#' in the Amazon Redshift Cluster Management Guide.
#'
#' If this option is `true`, enhanced VPC routing is enabled.
#'
#' Default: false
#' @param AdditionalInfo Reserved.
#' @param IamRoles A list of AWS Identity and Access Management (IAM) roles that can be
#' used by the cluster to access other AWS services. You must supply the
#' IAM roles in their Amazon Resource Name (ARN) format. You can supply up
#' to 10 IAM roles in a single request.
#'
#' A cluster can have up to 10 IAM roles associated with it at any time.
#' @param MaintenanceTrackName An optional parameter for the name of the maintenance track for the
#' cluster. If you don't provide a maintenance track name, the cluster is
#' assigned to the `current` track.
#' @param SnapshotScheduleIdentifier A unique identifier for the snapshot schedule.
#' @param AvailabilityZoneRelocation The option to enable relocation for an Amazon Redshift cluster between
#' Availability Zones after the cluster is created.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Cluster = list(
#' ClusterIdentifier = "string",
#' NodeType = "string",
#' ClusterStatus = "string",
#' ClusterAvailabilityStatus = "string",
#' ModifyStatus = "string",
#' MasterUsername = "string",
#' DBName = "string",
#' Endpoint = list(
#' Address = "string",
#' Port = 123,
#' VpcEndpoints = list(
#' list(
#' VpcEndpointId = "string"
#' )
#' )
#' ),
#' ClusterCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' AutomatedSnapshotRetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' ClusterSecurityGroups = list(
#' list(
#' ClusterSecurityGroupName = "string",
#' Status = "string"
#' )
#' ),
#' VpcSecurityGroups = list(
#' list(
#' VpcSecurityGroupId = "string",
#' Status = "string"
#' )
#' ),
#' ClusterParameterGroups = list(
#' list(
#' ParameterGroupName = "string",
#' ParameterApplyStatus = "string",
#' ClusterParameterStatusList = list(
#' list(
#' ParameterName = "string",
#' ParameterApplyStatus = "string",
#' ParameterApplyErrorDescription = "string"
#' )
#' )
#' )
#' ),
#' ClusterSubnetGroupName = "string",
#' VpcId = "string",
#' AvailabilityZone = "string",
#' PreferredMaintenanceWindow = "string",
#' PendingModifiedValues = list(
#' MasterUserPassword = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' ClusterType = "string",
#' ClusterVersion = "string",
#' AutomatedSnapshotRetentionPeriod = 123,
#' ClusterIdentifier = "string",
#' PubliclyAccessible = TRUE|FALSE,
#' EnhancedVpcRouting = TRUE|FALSE,
#' MaintenanceTrackName = "string",
#' EncryptionType = "string"
#' ),
#' ClusterVersion = "string",
#' AllowVersionUpgrade = TRUE|FALSE,
#' NumberOfNodes = 123,
#' PubliclyAccessible = TRUE|FALSE,
#' Encrypted = TRUE|FALSE,
#' RestoreStatus = list(
#' Status = "string",
#' CurrentRestoreRateInMegaBytesPerSecond = 123.0,
#' SnapshotSizeInMegaBytes = 123,
#' ProgressInMegaBytes = 123,
#' ElapsedTimeInSeconds = 123,
#' EstimatedTimeToCompletionInSeconds = 123
#' ),
#' DataTransferProgress = list(
#' Status = "string",
#' CurrentRateInMegaBytesPerSecond = 123.0,
#' TotalDataInMegaBytes = 123,
#' DataTransferredInMegaBytes = 123,
#' EstimatedTimeToCompletionInSeconds = 123,
#' ElapsedTimeInSeconds = 123
#' ),
#' HsmStatus = list(
#' HsmClientCertificateIdentifier = "string",
#' HsmConfigurationIdentifier = "string",
#' Status = "string"
#' ),
#' ClusterSnapshotCopyStatus = list(
#' DestinationRegion = "string",
#' RetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' SnapshotCopyGrantName = "string"
#' ),
#' ClusterPublicKey = "string",
#' ClusterNodes = list(
#' list(
#' NodeRole = "string",
#' PrivateIPAddress = "string",
#' PublicIPAddress = "string"
#' )
#' ),
#' ElasticIpStatus = list(
#' ElasticIp = "string",
#' Status = "string"
#' ),
#' ClusterRevisionNumber = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' ),
#' KmsKeyId = "string",
#' EnhancedVpcRouting = TRUE|FALSE,
#' IamRoles = list(
#' list(
#' IamRoleArn = "string",
#' ApplyStatus = "string"
#' )
#' ),
#' PendingActions = list(
#' "string"
#' ),
#' MaintenanceTrackName = "string",
#' ElasticResizeNumberOfNodeOptions = "string",
#' DeferredMaintenanceWindows = list(
#' list(
#' DeferMaintenanceIdentifier = "string",
#' DeferMaintenanceStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' DeferMaintenanceEndTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' ),
#' SnapshotScheduleIdentifier = "string",
#' SnapshotScheduleState = "MODIFYING"|"ACTIVE"|"FAILED",
#' ExpectedNextSnapshotScheduleTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ExpectedNextSnapshotScheduleTimeStatus = "string",
#' NextMaintenanceWindowStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ResizeInfo = list(
#' ResizeType = "string",
#' AllowCancelResize = TRUE|FALSE
#' ),
#' AvailabilityZoneRelocationStatus = "string",
#' ClusterNamespaceArn = "string"
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$create_cluster(
#' DBName = "string",
#' ClusterIdentifier = "string",
#' ClusterType = "string",
#' NodeType = "string",
#' MasterUsername = "string",
#' MasterUserPassword = "string",
#' ClusterSecurityGroups = list(
#' "string"
#' ),
#' VpcSecurityGroupIds = list(
#' "string"
#' ),
#' ClusterSubnetGroupName = "string",
#' AvailabilityZone = "string",
#' PreferredMaintenanceWindow = "string",
#' ClusterParameterGroupName = "string",
#' AutomatedSnapshotRetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' Port = 123,
#' ClusterVersion = "string",
#' AllowVersionUpgrade = TRUE|FALSE,
#' NumberOfNodes = 123,
#' PubliclyAccessible = TRUE|FALSE,
#' Encrypted = TRUE|FALSE,
#' HsmClientCertificateIdentifier = "string",
#' HsmConfigurationIdentifier = "string",
#' ElasticIp = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' ),
#' KmsKeyId = "string",
#' EnhancedVpcRouting = TRUE|FALSE,
#' AdditionalInfo = "string",
#' IamRoles = list(
#' "string"
#' ),
#' MaintenanceTrackName = "string",
#' SnapshotScheduleIdentifier = "string",
#' AvailabilityZoneRelocation = TRUE|FALSE
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_create_cluster
redshift_create_cluster <- function(DBName = NULL, ClusterIdentifier, ClusterType = NULL, NodeType, MasterUsername, MasterUserPassword, ClusterSecurityGroups = NULL, VpcSecurityGroupIds = NULL, ClusterSubnetGroupName = NULL, AvailabilityZone = NULL, PreferredMaintenanceWindow = NULL, ClusterParameterGroupName = NULL, AutomatedSnapshotRetentionPeriod = NULL, ManualSnapshotRetentionPeriod = NULL, Port = NULL, ClusterVersion = NULL, AllowVersionUpgrade = NULL, NumberOfNodes = NULL, PubliclyAccessible = NULL, Encrypted = NULL, HsmClientCertificateIdentifier = NULL, HsmConfigurationIdentifier = NULL, ElasticIp = NULL, Tags = NULL, KmsKeyId = NULL, EnhancedVpcRouting = NULL, AdditionalInfo = NULL, IamRoles = NULL, MaintenanceTrackName = NULL, SnapshotScheduleIdentifier = NULL, AvailabilityZoneRelocation = NULL) {
op <- new_operation(
name = "CreateCluster",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$create_cluster_input(DBName = DBName, ClusterIdentifier = ClusterIdentifier, ClusterType = ClusterType, NodeType = NodeType, MasterUsername = MasterUsername, MasterUserPassword = MasterUserPassword, ClusterSecurityGroups = ClusterSecurityGroups, VpcSecurityGroupIds = VpcSecurityGroupIds, ClusterSubnetGroupName = ClusterSubnetGroupName, AvailabilityZone = AvailabilityZone, PreferredMaintenanceWindow = PreferredMaintenanceWindow, ClusterParameterGroupName = ClusterParameterGroupName, AutomatedSnapshotRetentionPeriod = AutomatedSnapshotRetentionPeriod, ManualSnapshotRetentionPeriod = ManualSnapshotRetentionPeriod, Port = Port, ClusterVersion = ClusterVersion, AllowVersionUpgrade = AllowVersionUpgrade, NumberOfNodes = NumberOfNodes, PubliclyAccessible = PubliclyAccessible, Encrypted = Encrypted, HsmClientCertificateIdentifier = HsmClientCertificateIdentifier, HsmConfigurationIdentifier = HsmConfigurationIdentifier, ElasticIp = ElasticIp, Tags = Tags, KmsKeyId = KmsKeyId, EnhancedVpcRouting = EnhancedVpcRouting, AdditionalInfo = AdditionalInfo, IamRoles = IamRoles, MaintenanceTrackName = MaintenanceTrackName, SnapshotScheduleIdentifier = SnapshotScheduleIdentifier, AvailabilityZoneRelocation = AvailabilityZoneRelocation)
output <- .redshift$create_cluster_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$create_cluster <- redshift_create_cluster
#' Creates an Amazon Redshift parameter group
#'
#' @description
#' Creates an Amazon Redshift parameter group.
#'
#' Creating parameter groups is independent of creating clusters. You can
#' associate a cluster with a parameter group when you create the cluster.
#' You can also associate an existing cluster with a parameter group after
#' the cluster is created by using
#' [`modify_cluster`][redshift_modify_cluster].
#'
#' Parameters in the parameter group define specific behavior that applies
#' to the databases you create on the cluster. For more information about
#' parameters and parameter groups, go to [Amazon Redshift Parameter
#' Groups](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' @usage
#' redshift_create_cluster_parameter_group(ParameterGroupName,
#' ParameterGroupFamily, Description, Tags)
#'
#' @param ParameterGroupName [required] The name of the cluster parameter group.
#'
#' Constraints:
#'
#' - Must be 1 to 255 alphanumeric characters or hyphens
#'
#' - First character must be a letter.
#'
#' - Cannot end with a hyphen or contain two consecutive hyphens.
#'
#' - Must be unique withing your AWS account.
#'
#' This value is stored as a lower-case string.
#' @param ParameterGroupFamily [required] The Amazon Redshift engine version to which the cluster parameter group
#' applies. The cluster engine version determines the set of parameters.
#'
#' To get a list of valid parameter group family names, you can call
#' [`describe_cluster_parameter_groups`][redshift_describe_cluster_parameter_groups].
#' By default, Amazon Redshift returns a list of all the parameter groups
#' that are owned by your AWS account, including the default parameter
#' groups for each Amazon Redshift engine version. The parameter group
#' family names associated with the default parameter groups provide you
#' the valid values. For example, a valid family name is "redshift-1.0".
#' @param Description [required] A description of the parameter group.
#' @param Tags A list of tag instances.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' ClusterParameterGroup = list(
#' ParameterGroupName = "string",
#' ParameterGroupFamily = "string",
#' Description = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$create_cluster_parameter_group(
#' ParameterGroupName = "string",
#' ParameterGroupFamily = "string",
#' Description = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_create_cluster_parameter_group
redshift_create_cluster_parameter_group <- function(ParameterGroupName, ParameterGroupFamily, Description, Tags = NULL) {
op <- new_operation(
name = "CreateClusterParameterGroup",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$create_cluster_parameter_group_input(ParameterGroupName = ParameterGroupName, ParameterGroupFamily = ParameterGroupFamily, Description = Description, Tags = Tags)
output <- .redshift$create_cluster_parameter_group_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$create_cluster_parameter_group <- redshift_create_cluster_parameter_group
#' Creates a new Amazon Redshift security group
#'
#' @description
#' Creates a new Amazon Redshift security group. You use security groups to
#' control access to non-VPC clusters.
#'
#' For information about managing security groups, go to [Amazon Redshift
#' Cluster Security
#' Groups](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-security-groups.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' @usage
#' redshift_create_cluster_security_group(ClusterSecurityGroupName,
#' Description, Tags)
#'
#' @param ClusterSecurityGroupName [required] The name for the security group. Amazon Redshift stores the value as a
#' lowercase string.
#'
#' Constraints:
#'
#' - Must contain no more than 255 alphanumeric characters or hyphens.
#'
#' - Must not be "Default".
#'
#' - Must be unique for all security groups that are created by your AWS
#' account.
#'
#' Example: `examplesecuritygroup`
#' @param Description [required] A description for the security group.
#' @param Tags A list of tag instances.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' ClusterSecurityGroup = list(
#' ClusterSecurityGroupName = "string",
#' Description = "string",
#' EC2SecurityGroups = list(
#' list(
#' Status = "string",
#' EC2SecurityGroupName = "string",
#' EC2SecurityGroupOwnerId = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' ),
#' IPRanges = list(
#' list(
#' Status = "string",
#' CIDRIP = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' ),
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$create_cluster_security_group(
#' ClusterSecurityGroupName = "string",
#' Description = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_create_cluster_security_group
redshift_create_cluster_security_group <- function(ClusterSecurityGroupName, Description, Tags = NULL) {
op <- new_operation(
name = "CreateClusterSecurityGroup",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$create_cluster_security_group_input(ClusterSecurityGroupName = ClusterSecurityGroupName, Description = Description, Tags = Tags)
output <- .redshift$create_cluster_security_group_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$create_cluster_security_group <- redshift_create_cluster_security_group
#' Creates a manual snapshot of the specified cluster
#'
#' @description
#' Creates a manual snapshot of the specified cluster. The cluster must be
#' in the `available` state.
#'
#' For more information about working with snapshots, go to [Amazon
#' Redshift
#' Snapshots](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-snapshots.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' @usage
#' redshift_create_cluster_snapshot(SnapshotIdentifier, ClusterIdentifier,
#' ManualSnapshotRetentionPeriod, Tags)
#'
#' @param SnapshotIdentifier [required] A unique identifier for the snapshot that you are requesting. This
#' identifier must be unique for all snapshots within the AWS account.
#'
#' Constraints:
#'
#' - Cannot be null, empty, or blank
#'
#' - Must contain from 1 to 255 alphanumeric characters or hyphens
#'
#' - First character must be a letter
#'
#' - Cannot end with a hyphen or contain two consecutive hyphens
#'
#' Example: `my-snapshot-id`
#' @param ClusterIdentifier [required] The cluster identifier for which you want a snapshot.
#' @param ManualSnapshotRetentionPeriod The number of days that a manual snapshot is retained. If the value is
#' -1, the manual snapshot is retained indefinitely.
#'
#' The value must be either -1 or an integer between 1 and 3,653.
#'
#' The default value is -1.
#' @param Tags A list of tag instances.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Snapshot = list(
#' SnapshotIdentifier = "string",
#' ClusterIdentifier = "string",
#' SnapshotCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' Status = "string",
#' Port = 123,
#' AvailabilityZone = "string",
#' ClusterCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' MasterUsername = "string",
#' ClusterVersion = "string",
#' EngineFullVersion = "string",
#' SnapshotType = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' DBName = "string",
#' VpcId = "string",
#' Encrypted = TRUE|FALSE,
#' KmsKeyId = "string",
#' EncryptedWithHSM = TRUE|FALSE,
#' AccountsWithRestoreAccess = list(
#' list(
#' AccountId = "string",
#' AccountAlias = "string"
#' )
#' ),
#' OwnerAccount = "string",
#' TotalBackupSizeInMegaBytes = 123.0,
#' ActualIncrementalBackupSizeInMegaBytes = 123.0,
#' BackupProgressInMegaBytes = 123.0,
#' CurrentBackupRateInMegaBytesPerSecond = 123.0,
#' EstimatedSecondsToCompletion = 123,
#' ElapsedTimeInSeconds = 123,
#' SourceRegion = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' ),
#' RestorableNodeTypes = list(
#' "string"
#' ),
#' EnhancedVpcRouting = TRUE|FALSE,
#' MaintenanceTrackName = "string",
#' ManualSnapshotRetentionPeriod = 123,
#' ManualSnapshotRemainingDays = 123,
#' SnapshotRetentionStartTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$create_cluster_snapshot(
#' SnapshotIdentifier = "string",
#' ClusterIdentifier = "string",
#' ManualSnapshotRetentionPeriod = 123,
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_create_cluster_snapshot
redshift_create_cluster_snapshot <- function(SnapshotIdentifier, ClusterIdentifier, ManualSnapshotRetentionPeriod = NULL, Tags = NULL) {
op <- new_operation(
name = "CreateClusterSnapshot",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$create_cluster_snapshot_input(SnapshotIdentifier = SnapshotIdentifier, ClusterIdentifier = ClusterIdentifier, ManualSnapshotRetentionPeriod = ManualSnapshotRetentionPeriod, Tags = Tags)
output <- .redshift$create_cluster_snapshot_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$create_cluster_snapshot <- redshift_create_cluster_snapshot
#' Creates a new Amazon Redshift subnet group
#'
#' @description
#' Creates a new Amazon Redshift subnet group. You must provide a list of
#' one or more subnets in your existing Amazon Virtual Private Cloud
#' (Amazon VPC) when creating Amazon Redshift subnet group.
#'
#' For information about subnet groups, go to [Amazon Redshift Cluster
#' Subnet
#' Groups](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-cluster-subnet-groups.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' @usage
#' redshift_create_cluster_subnet_group(ClusterSubnetGroupName,
#' Description, SubnetIds, Tags)
#'
#' @param ClusterSubnetGroupName [required] The name for the subnet group. Amazon Redshift stores the value as a
#' lowercase string.
#'
#' Constraints:
#'
#' - Must contain no more than 255 alphanumeric characters or hyphens.
#'
#' - Must not be "Default".
#'
#' - Must be unique for all subnet groups that are created by your AWS
#' account.
#'
#' Example: `examplesubnetgroup`
#' @param Description [required] A description for the subnet group.
#' @param SubnetIds [required] An array of VPC subnet IDs. A maximum of 20 subnets can be modified in a
#' single request.
#' @param Tags A list of tag instances.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' ClusterSubnetGroup = list(
#' ClusterSubnetGroupName = "string",
#' Description = "string",
#' VpcId = "string",
#' SubnetGroupStatus = "string",
#' Subnets = list(
#' list(
#' SubnetIdentifier = "string",
#' SubnetAvailabilityZone = list(
#' Name = "string",
#' SupportedPlatforms = list(
#' list(
#' Name = "string"
#' )
#' )
#' ),
#' SubnetStatus = "string"
#' )
#' ),
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$create_cluster_subnet_group(
#' ClusterSubnetGroupName = "string",
#' Description = "string",
#' SubnetIds = list(
#' "string"
#' ),
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_create_cluster_subnet_group
redshift_create_cluster_subnet_group <- function(ClusterSubnetGroupName, Description, SubnetIds, Tags = NULL) {
op <- new_operation(
name = "CreateClusterSubnetGroup",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$create_cluster_subnet_group_input(ClusterSubnetGroupName = ClusterSubnetGroupName, Description = Description, SubnetIds = SubnetIds, Tags = Tags)
output <- .redshift$create_cluster_subnet_group_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$create_cluster_subnet_group <- redshift_create_cluster_subnet_group
#' Creates an Amazon Redshift event notification subscription
#'
#' @description
#' Creates an Amazon Redshift event notification subscription. This action
#' requires an ARN (Amazon Resource Name) of an Amazon SNS topic created by
#' either the Amazon Redshift console, the Amazon SNS console, or the
#' Amazon SNS API. To obtain an ARN with Amazon SNS, you must create a
#' topic in Amazon SNS and subscribe to the topic. The ARN is displayed in
#' the SNS console.
#'
#' You can specify the source type, and lists of Amazon Redshift source
#' IDs, event categories, and event severities. Notifications will be sent
#' for all events you want that match those criteria. For example, you can
#' specify source type = cluster, source ID = my-cluster-1 and mycluster2,
#' event categories = Availability, Backup, and severity = ERROR. The
#' subscription will only send notifications for those ERROR events in the
#' Availability and Backup categories for the specified clusters.
#'
#' If you specify both the source type and source IDs, such as source type
#' = cluster and source identifier = my-cluster-1, notifications will be
#' sent for all the cluster events for my-cluster-1. If you specify a
#' source type but do not specify a source identifier, you will receive
#' notice of the events for the objects of that type in your AWS account.
#' If you do not specify either the SourceType nor the SourceIdentifier,
#' you will be notified of events generated from all Amazon Redshift
#' sources belonging to your AWS account. You must specify a source type if
#' you specify a source ID.
#'
#' @usage
#' redshift_create_event_subscription(SubscriptionName, SnsTopicArn,
#' SourceType, SourceIds, EventCategories, Severity, Enabled, Tags)
#'
#' @param SubscriptionName [required] The name of the event subscription to be created.
#'
#' Constraints:
#'
#' - Cannot be null, empty, or blank.
#'
#' - Must contain from 1 to 255 alphanumeric characters or hyphens.
#'
#' - First character must be a letter.
#'
#' - Cannot end with a hyphen or contain two consecutive hyphens.
#' @param SnsTopicArn [required] The Amazon Resource Name (ARN) of the Amazon SNS topic used to transmit
#' the event notifications. The ARN is created by Amazon SNS when you
#' create a topic and subscribe to it.
#' @param SourceType The type of source that will be generating the events. For example, if
#' you want to be notified of events generated by a cluster, you would set
#' this parameter to cluster. If this value is not specified, events are
#' returned for all Amazon Redshift objects in your AWS account. You must
#' specify a source type in order to specify source IDs.
#'
#' Valid values: cluster, cluster-parameter-group, cluster-security-group,
#' cluster-snapshot, and scheduled-action.
#' @param SourceIds A list of one or more identifiers of Amazon Redshift source objects. All
#' of the objects must be of the same type as was specified in the source
#' type parameter. The event subscription will return only events generated
#' by the specified objects. If not specified, then events are returned for
#' all objects within the source type specified.
#'
#' Example: my-cluster-1, my-cluster-2
#'
#' Example: my-snapshot-20131010
#' @param EventCategories Specifies the Amazon Redshift event categories to be published by the
#' event notification subscription.
#'
#' Values: configuration, management, monitoring, security
#' @param Severity Specifies the Amazon Redshift event severity to be published by the
#' event notification subscription.
#'
#' Values: ERROR, INFO
#' @param Enabled A boolean value; set to `true` to activate the subscription, and set to
#' `false` to create the subscription but not activate it.
#' @param Tags A list of tag instances.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' EventSubscription = list(
#' CustomerAwsId = "string",
#' CustSubscriptionId = "string",
#' SnsTopicArn = "string",
#' Status = "string",
#' SubscriptionCreationTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' SourceType = "string",
#' SourceIdsList = list(
#' "string"
#' ),
#' EventCategoriesList = list(
#' "string"
#' ),
#' Severity = "string",
#' Enabled = TRUE|FALSE,
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$create_event_subscription(
#' SubscriptionName = "string",
#' SnsTopicArn = "string",
#' SourceType = "string",
#' SourceIds = list(
#' "string"
#' ),
#' EventCategories = list(
#' "string"
#' ),
#' Severity = "string",
#' Enabled = TRUE|FALSE,
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_create_event_subscription
redshift_create_event_subscription <- function(SubscriptionName, SnsTopicArn, SourceType = NULL, SourceIds = NULL, EventCategories = NULL, Severity = NULL, Enabled = NULL, Tags = NULL) {
op <- new_operation(
name = "CreateEventSubscription",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$create_event_subscription_input(SubscriptionName = SubscriptionName, SnsTopicArn = SnsTopicArn, SourceType = SourceType, SourceIds = SourceIds, EventCategories = EventCategories, Severity = Severity, Enabled = Enabled, Tags = Tags)
output <- .redshift$create_event_subscription_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$create_event_subscription <- redshift_create_event_subscription
#' Creates an HSM client certificate that an Amazon Redshift cluster will
#' use to connect to the client's HSM in order to store and retrieve the
#' keys used to encrypt the cluster databases
#'
#' @description
#' Creates an HSM client certificate that an Amazon Redshift cluster will
#' use to connect to the client's HSM in order to store and retrieve the
#' keys used to encrypt the cluster databases.
#'
#' The command returns a public key, which you must store in the HSM. In
#' addition to creating the HSM certificate, you must create an Amazon
#' Redshift HSM configuration that provides a cluster the information
#' needed to store and use encryption keys in the HSM. For more
#' information, go to [Hardware Security
#' Modules](https://docs.aws.amazon.com/redshift/latest/mgmt/) in the
#' Amazon Redshift Cluster Management Guide.
#'
#' @usage
#' redshift_create_hsm_client_certificate(HsmClientCertificateIdentifier,
#' Tags)
#'
#' @param HsmClientCertificateIdentifier [required] The identifier to be assigned to the new HSM client certificate that the
#' cluster will use to connect to the HSM to use the database encryption
#' keys.
#' @param Tags A list of tag instances.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' HsmClientCertificate = list(
#' HsmClientCertificateIdentifier = "string",
#' HsmClientCertificatePublicKey = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$create_hsm_client_certificate(
#' HsmClientCertificateIdentifier = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_create_hsm_client_certificate
redshift_create_hsm_client_certificate <- function(HsmClientCertificateIdentifier, Tags = NULL) {
op <- new_operation(
name = "CreateHsmClientCertificate",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$create_hsm_client_certificate_input(HsmClientCertificateIdentifier = HsmClientCertificateIdentifier, Tags = Tags)
output <- .redshift$create_hsm_client_certificate_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$create_hsm_client_certificate <- redshift_create_hsm_client_certificate
#' Creates an HSM configuration that contains the information required by
#' an Amazon Redshift cluster to store and use database encryption keys in
#' a Hardware Security Module (HSM)
#'
#' @description
#' Creates an HSM configuration that contains the information required by
#' an Amazon Redshift cluster to store and use database encryption keys in
#' a Hardware Security Module (HSM). After creating the HSM configuration,
#' you can specify it as a parameter when creating a cluster. The cluster
#' will then store its encryption keys in the HSM.
#'
#' In addition to creating an HSM configuration, you must also create an
#' HSM client certificate. For more information, go to [Hardware Security
#' Modules](https://docs.aws.amazon.com/redshift/latest/mgmt/) in the
#' Amazon Redshift Cluster Management Guide.
#'
#' @usage
#' redshift_create_hsm_configuration(HsmConfigurationIdentifier,
#' Description, HsmIpAddress, HsmPartitionName, HsmPartitionPassword,
#' HsmServerPublicCertificate, Tags)
#'
#' @param HsmConfigurationIdentifier [required] The identifier to be assigned to the new Amazon Redshift HSM
#' configuration.
#' @param Description [required] A text description of the HSM configuration to be created.
#' @param HsmIpAddress [required] The IP address that the Amazon Redshift cluster must use to access the
#' HSM.
#' @param HsmPartitionName [required] The name of the partition in the HSM where the Amazon Redshift clusters
#' will store their database encryption keys.
#' @param HsmPartitionPassword [required] The password required to access the HSM partition.
#' @param HsmServerPublicCertificate [required] The HSMs public certificate file. When using Cloud HSM, the file name is
#' server.pem.
#' @param Tags A list of tag instances.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' HsmConfiguration = list(
#' HsmConfigurationIdentifier = "string",
#' Description = "string",
#' HsmIpAddress = "string",
#' HsmPartitionName = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$create_hsm_configuration(
#' HsmConfigurationIdentifier = "string",
#' Description = "string",
#' HsmIpAddress = "string",
#' HsmPartitionName = "string",
#' HsmPartitionPassword = "string",
#' HsmServerPublicCertificate = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_create_hsm_configuration
redshift_create_hsm_configuration <- function(HsmConfigurationIdentifier, Description, HsmIpAddress, HsmPartitionName, HsmPartitionPassword, HsmServerPublicCertificate, Tags = NULL) {
op <- new_operation(
name = "CreateHsmConfiguration",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$create_hsm_configuration_input(HsmConfigurationIdentifier = HsmConfigurationIdentifier, Description = Description, HsmIpAddress = HsmIpAddress, HsmPartitionName = HsmPartitionName, HsmPartitionPassword = HsmPartitionPassword, HsmServerPublicCertificate = HsmServerPublicCertificate, Tags = Tags)
output <- .redshift$create_hsm_configuration_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$create_hsm_configuration <- redshift_create_hsm_configuration
#' Creates a scheduled action
#'
#' @description
#' Creates a scheduled action. A scheduled action contains a schedule and
#' an Amazon Redshift API action. For example, you can create a schedule of
#' when to run the [`resize_cluster`][redshift_resize_cluster] API
#' operation.
#'
#' @usage
#' redshift_create_scheduled_action(ScheduledActionName, TargetAction,
#' Schedule, IamRole, ScheduledActionDescription, StartTime, EndTime,
#' Enable)
#'
#' @param ScheduledActionName [required] The name of the scheduled action. The name must be unique within an
#' account. For more information about this parameter, see ScheduledAction.
#' @param TargetAction [required] A JSON format string of the Amazon Redshift API operation with input
#' parameters. For more information about this parameter, see
#' ScheduledAction.
#' @param Schedule [required] The schedule in `at( )` or `cron( )` format. For more information about
#' this parameter, see ScheduledAction.
#' @param IamRole [required] The IAM role to assume to run the target action. For more information
#' about this parameter, see ScheduledAction.
#' @param ScheduledActionDescription The description of the scheduled action.
#' @param StartTime The start time in UTC of the scheduled action. Before this time, the
#' scheduled action does not trigger. For more information about this
#' parameter, see ScheduledAction.
#' @param EndTime The end time in UTC of the scheduled action. After this time, the
#' scheduled action does not trigger. For more information about this
#' parameter, see ScheduledAction.
#' @param Enable If true, the schedule is enabled. If false, the scheduled action does
#' not trigger. For more information about `state` of the scheduled action,
#' see ScheduledAction.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' ScheduledActionName = "string",
#' TargetAction = list(
#' ResizeCluster = list(
#' ClusterIdentifier = "string",
#' ClusterType = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' Classic = TRUE|FALSE
#' ),
#' PauseCluster = list(
#' ClusterIdentifier = "string"
#' ),
#' ResumeCluster = list(
#' ClusterIdentifier = "string"
#' )
#' ),
#' Schedule = "string",
#' IamRole = "string",
#' ScheduledActionDescription = "string",
#' State = "ACTIVE"|"DISABLED",
#' NextInvocations = list(
#' as.POSIXct(
#' "2015-01-01"
#' )
#' ),
#' StartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' EndTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$create_scheduled_action(
#' ScheduledActionName = "string",
#' TargetAction = list(
#' ResizeCluster = list(
#' ClusterIdentifier = "string",
#' ClusterType = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' Classic = TRUE|FALSE
#' ),
#' PauseCluster = list(
#' ClusterIdentifier = "string"
#' ),
#' ResumeCluster = list(
#' ClusterIdentifier = "string"
#' )
#' ),
#' Schedule = "string",
#' IamRole = "string",
#' ScheduledActionDescription = "string",
#' StartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' EndTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' Enable = TRUE|FALSE
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_create_scheduled_action
redshift_create_scheduled_action <- function(ScheduledActionName, TargetAction, Schedule, IamRole, ScheduledActionDescription = NULL, StartTime = NULL, EndTime = NULL, Enable = NULL) {
op <- new_operation(
name = "CreateScheduledAction",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$create_scheduled_action_input(ScheduledActionName = ScheduledActionName, TargetAction = TargetAction, Schedule = Schedule, IamRole = IamRole, ScheduledActionDescription = ScheduledActionDescription, StartTime = StartTime, EndTime = EndTime, Enable = Enable)
output <- .redshift$create_scheduled_action_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$create_scheduled_action <- redshift_create_scheduled_action
#' Creates a snapshot copy grant that permits Amazon Redshift to use a
#' customer master key (CMK) from AWS Key Management Service (AWS KMS) to
#' encrypt copied snapshots in a destination region
#'
#' @description
#' Creates a snapshot copy grant that permits Amazon Redshift to use a
#' customer master key (CMK) from AWS Key Management Service (AWS KMS) to
#' encrypt copied snapshots in a destination region.
#'
#' For more information about managing snapshot copy grants, go to [Amazon
#' Redshift Database
#' Encryption](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-db-encryption.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' @usage
#' redshift_create_snapshot_copy_grant(SnapshotCopyGrantName, KmsKeyId,
#' Tags)
#'
#' @param SnapshotCopyGrantName [required] The name of the snapshot copy grant. This name must be unique in the
#' region for the AWS account.
#'
#' Constraints:
#'
#' - Must contain from 1 to 63 alphanumeric characters or hyphens.
#'
#' - Alphabetic characters must be lowercase.
#'
#' - First character must be a letter.
#'
#' - Cannot end with a hyphen or contain two consecutive hyphens.
#'
#' - Must be unique for all clusters within an AWS account.
#' @param KmsKeyId The unique identifier of the customer master key (CMK) to which to grant
#' Amazon Redshift permission. If no key is specified, the default key is
#' used.
#' @param Tags A list of tag instances.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' SnapshotCopyGrant = list(
#' SnapshotCopyGrantName = "string",
#' KmsKeyId = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$create_snapshot_copy_grant(
#' SnapshotCopyGrantName = "string",
#' KmsKeyId = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_create_snapshot_copy_grant
redshift_create_snapshot_copy_grant <- function(SnapshotCopyGrantName, KmsKeyId = NULL, Tags = NULL) {
op <- new_operation(
name = "CreateSnapshotCopyGrant",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$create_snapshot_copy_grant_input(SnapshotCopyGrantName = SnapshotCopyGrantName, KmsKeyId = KmsKeyId, Tags = Tags)
output <- .redshift$create_snapshot_copy_grant_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$create_snapshot_copy_grant <- redshift_create_snapshot_copy_grant
#' Create a snapshot schedule that can be associated to a cluster and which
#' overrides the default system backup schedule
#'
#' @description
#' Create a snapshot schedule that can be associated to a cluster and which
#' overrides the default system backup schedule.
#'
#' @usage
#' redshift_create_snapshot_schedule(ScheduleDefinitions,
#' ScheduleIdentifier, ScheduleDescription, Tags, DryRun, NextInvocations)
#'
#' @param ScheduleDefinitions The definition of the snapshot schedule. The definition is made up of
#' schedule expressions, for example "cron(30 12 *)" or "rate(12 hours)".
#' @param ScheduleIdentifier A unique identifier for a snapshot schedule. Only alphanumeric
#' characters are allowed for the identifier.
#' @param ScheduleDescription The description of the snapshot schedule.
#' @param Tags An optional set of tags you can use to search for the schedule.
#' @param DryRun
#' @param NextInvocations
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' ScheduleDefinitions = list(
#' "string"
#' ),
#' ScheduleIdentifier = "string",
#' ScheduleDescription = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' ),
#' NextInvocations = list(
#' as.POSIXct(
#' "2015-01-01"
#' )
#' ),
#' AssociatedClusterCount = 123,
#' AssociatedClusters = list(
#' list(
#' ClusterIdentifier = "string",
#' ScheduleAssociationState = "MODIFYING"|"ACTIVE"|"FAILED"
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$create_snapshot_schedule(
#' ScheduleDefinitions = list(
#' "string"
#' ),
#' ScheduleIdentifier = "string",
#' ScheduleDescription = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' ),
#' DryRun = TRUE|FALSE,
#' NextInvocations = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_create_snapshot_schedule
redshift_create_snapshot_schedule <- function(ScheduleDefinitions = NULL, ScheduleIdentifier = NULL, ScheduleDescription = NULL, Tags = NULL, DryRun = NULL, NextInvocations = NULL) {
op <- new_operation(
name = "CreateSnapshotSchedule",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$create_snapshot_schedule_input(ScheduleDefinitions = ScheduleDefinitions, ScheduleIdentifier = ScheduleIdentifier, ScheduleDescription = ScheduleDescription, Tags = Tags, DryRun = DryRun, NextInvocations = NextInvocations)
output <- .redshift$create_snapshot_schedule_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$create_snapshot_schedule <- redshift_create_snapshot_schedule
#' Adds tags to a cluster
#'
#' @description
#' Adds tags to a cluster.
#'
#' A resource can have up to 50 tags. If you try to create more than 50
#' tags for a resource, you will receive an error and the attempt will
#' fail.
#'
#' If you specify a key that already exists for the resource, the value for
#' that key will be updated with the new value.
#'
#' @usage
#' redshift_create_tags(ResourceName, Tags)
#'
#' @param ResourceName [required] The Amazon Resource Name (ARN) to which you want to add the tag or tags.
#' For example, `arn:aws:redshift:us-east-2:123456789:cluster:t1`.
#' @param Tags [required] One or more name/value pairs to add as tags to the specified resource.
#' Each tag name is passed in with the parameter `Key` and the
#' corresponding value is passed in with the parameter `Value`. The `Key`
#' and `Value` parameters are separated by a comma (,). Separate multiple
#' tags with a space. For example,
#' `--tags "Key"="owner","Value"="admin" "Key"="environment","Value"="test" "Key"="version","Value"="1.0"`.
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$create_tags(
#' ResourceName = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_create_tags
redshift_create_tags <- function(ResourceName, Tags) {
op <- new_operation(
name = "CreateTags",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$create_tags_input(ResourceName = ResourceName, Tags = Tags)
output <- .redshift$create_tags_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$create_tags <- redshift_create_tags
#' Creates a usage limit for a specified Amazon Redshift feature on a
#' cluster
#'
#' @description
#' Creates a usage limit for a specified Amazon Redshift feature on a
#' cluster. The usage limit is identified by the returned usage limit
#' identifier.
#'
#' @usage
#' redshift_create_usage_limit(ClusterIdentifier, FeatureType, LimitType,
#' Amount, Period, BreachAction, Tags)
#'
#' @param ClusterIdentifier [required] The identifier of the cluster that you want to limit usage.
#' @param FeatureType [required] The Amazon Redshift feature that you want to limit.
#' @param LimitType [required] The type of limit. Depending on the feature type, this can be based on a
#' time duration or data size. If `FeatureType` is `spectrum`, then
#' `LimitType` must be `data-scanned`. If `FeatureType` is
#' `concurrency-scaling`, then `LimitType` must be `time`.
#' @param Amount [required] The limit amount. If time-based, this amount is in minutes. If
#' data-based, this amount is in terabytes (TB). The value must be a
#' positive number.
#' @param Period The time period that the amount applies to. A `weekly` period begins on
#' Sunday. The default is `monthly`.
#' @param BreachAction The action that Amazon Redshift takes when the limit is reached. The
#' default is log. For more information about this parameter, see
#' UsageLimit.
#' @param Tags A list of tag instances.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' UsageLimitId = "string",
#' ClusterIdentifier = "string",
#' FeatureType = "spectrum"|"concurrency-scaling",
#' LimitType = "time"|"data-scanned",
#' Amount = 123,
#' Period = "daily"|"weekly"|"monthly",
#' BreachAction = "log"|"emit-metric"|"disable",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$create_usage_limit(
#' ClusterIdentifier = "string",
#' FeatureType = "spectrum"|"concurrency-scaling",
#' LimitType = "time"|"data-scanned",
#' Amount = 123,
#' Period = "daily"|"weekly"|"monthly",
#' BreachAction = "log"|"emit-metric"|"disable",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_create_usage_limit
redshift_create_usage_limit <- function(ClusterIdentifier, FeatureType, LimitType, Amount, Period = NULL, BreachAction = NULL, Tags = NULL) {
op <- new_operation(
name = "CreateUsageLimit",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$create_usage_limit_input(ClusterIdentifier = ClusterIdentifier, FeatureType = FeatureType, LimitType = LimitType, Amount = Amount, Period = Period, BreachAction = BreachAction, Tags = Tags)
output <- .redshift$create_usage_limit_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$create_usage_limit <- redshift_create_usage_limit
#' Deletes a previously provisioned cluster without its final snapshot
#' being created
#'
#' @description
#' Deletes a previously provisioned cluster without its final snapshot
#' being created. A successful response from the web service indicates that
#' the request was received correctly. Use
#' [`describe_clusters`][redshift_describe_clusters] to monitor the status
#' of the deletion. The delete operation cannot be canceled or reverted
#' once submitted. For more information about managing clusters, go to
#' [Amazon Redshift
#' Clusters](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' If you want to shut down the cluster and retain it for future use, set
#' *SkipFinalClusterSnapshot* to `false` and specify a name for
#' *FinalClusterSnapshotIdentifier*. You can later restore this snapshot to
#' resume using the cluster. If a final cluster snapshot is requested, the
#' status of the cluster will be "final-snapshot" while the snapshot is
#' being taken, then it's "deleting" once Amazon Redshift begins deleting
#' the cluster.
#'
#' For more information about managing clusters, go to [Amazon Redshift
#' Clusters](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' @usage
#' redshift_delete_cluster(ClusterIdentifier, SkipFinalClusterSnapshot,
#' FinalClusterSnapshotIdentifier, FinalClusterSnapshotRetentionPeriod)
#'
#' @param ClusterIdentifier [required] The identifier of the cluster to be deleted.
#'
#' Constraints:
#'
#' - Must contain lowercase characters.
#'
#' - Must contain from 1 to 63 alphanumeric characters or hyphens.
#'
#' - First character must be a letter.
#'
#' - Cannot end with a hyphen or contain two consecutive hyphens.
#' @param SkipFinalClusterSnapshot Determines whether a final snapshot of the cluster is created before
#' Amazon Redshift deletes the cluster. If `true`, a final cluster snapshot
#' is not created. If `false`, a final cluster snapshot is created before
#' the cluster is deleted.
#'
#' The *FinalClusterSnapshotIdentifier* parameter must be specified if
#' *SkipFinalClusterSnapshot* is `false`.
#'
#' Default: `false`
#' @param FinalClusterSnapshotIdentifier The identifier of the final snapshot that is to be created immediately
#' before deleting the cluster. If this parameter is provided,
#' *SkipFinalClusterSnapshot* must be `false`.
#'
#' Constraints:
#'
#' - Must be 1 to 255 alphanumeric characters.
#'
#' - First character must be a letter.
#'
#' - Cannot end with a hyphen or contain two consecutive hyphens.
#' @param FinalClusterSnapshotRetentionPeriod The number of days that a manual snapshot is retained. If the value is
#' -1, the manual snapshot is retained indefinitely.
#'
#' The value must be either -1 or an integer between 1 and 3,653.
#'
#' The default value is -1.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Cluster = list(
#' ClusterIdentifier = "string",
#' NodeType = "string",
#' ClusterStatus = "string",
#' ClusterAvailabilityStatus = "string",
#' ModifyStatus = "string",
#' MasterUsername = "string",
#' DBName = "string",
#' Endpoint = list(
#' Address = "string",
#' Port = 123,
#' VpcEndpoints = list(
#' list(
#' VpcEndpointId = "string"
#' )
#' )
#' ),
#' ClusterCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' AutomatedSnapshotRetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' ClusterSecurityGroups = list(
#' list(
#' ClusterSecurityGroupName = "string",
#' Status = "string"
#' )
#' ),
#' VpcSecurityGroups = list(
#' list(
#' VpcSecurityGroupId = "string",
#' Status = "string"
#' )
#' ),
#' ClusterParameterGroups = list(
#' list(
#' ParameterGroupName = "string",
#' ParameterApplyStatus = "string",
#' ClusterParameterStatusList = list(
#' list(
#' ParameterName = "string",
#' ParameterApplyStatus = "string",
#' ParameterApplyErrorDescription = "string"
#' )
#' )
#' )
#' ),
#' ClusterSubnetGroupName = "string",
#' VpcId = "string",
#' AvailabilityZone = "string",
#' PreferredMaintenanceWindow = "string",
#' PendingModifiedValues = list(
#' MasterUserPassword = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' ClusterType = "string",
#' ClusterVersion = "string",
#' AutomatedSnapshotRetentionPeriod = 123,
#' ClusterIdentifier = "string",
#' PubliclyAccessible = TRUE|FALSE,
#' EnhancedVpcRouting = TRUE|FALSE,
#' MaintenanceTrackName = "string",
#' EncryptionType = "string"
#' ),
#' ClusterVersion = "string",
#' AllowVersionUpgrade = TRUE|FALSE,
#' NumberOfNodes = 123,
#' PubliclyAccessible = TRUE|FALSE,
#' Encrypted = TRUE|FALSE,
#' RestoreStatus = list(
#' Status = "string",
#' CurrentRestoreRateInMegaBytesPerSecond = 123.0,
#' SnapshotSizeInMegaBytes = 123,
#' ProgressInMegaBytes = 123,
#' ElapsedTimeInSeconds = 123,
#' EstimatedTimeToCompletionInSeconds = 123
#' ),
#' DataTransferProgress = list(
#' Status = "string",
#' CurrentRateInMegaBytesPerSecond = 123.0,
#' TotalDataInMegaBytes = 123,
#' DataTransferredInMegaBytes = 123,
#' EstimatedTimeToCompletionInSeconds = 123,
#' ElapsedTimeInSeconds = 123
#' ),
#' HsmStatus = list(
#' HsmClientCertificateIdentifier = "string",
#' HsmConfigurationIdentifier = "string",
#' Status = "string"
#' ),
#' ClusterSnapshotCopyStatus = list(
#' DestinationRegion = "string",
#' RetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' SnapshotCopyGrantName = "string"
#' ),
#' ClusterPublicKey = "string",
#' ClusterNodes = list(
#' list(
#' NodeRole = "string",
#' PrivateIPAddress = "string",
#' PublicIPAddress = "string"
#' )
#' ),
#' ElasticIpStatus = list(
#' ElasticIp = "string",
#' Status = "string"
#' ),
#' ClusterRevisionNumber = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' ),
#' KmsKeyId = "string",
#' EnhancedVpcRouting = TRUE|FALSE,
#' IamRoles = list(
#' list(
#' IamRoleArn = "string",
#' ApplyStatus = "string"
#' )
#' ),
#' PendingActions = list(
#' "string"
#' ),
#' MaintenanceTrackName = "string",
#' ElasticResizeNumberOfNodeOptions = "string",
#' DeferredMaintenanceWindows = list(
#' list(
#' DeferMaintenanceIdentifier = "string",
#' DeferMaintenanceStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' DeferMaintenanceEndTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' ),
#' SnapshotScheduleIdentifier = "string",
#' SnapshotScheduleState = "MODIFYING"|"ACTIVE"|"FAILED",
#' ExpectedNextSnapshotScheduleTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ExpectedNextSnapshotScheduleTimeStatus = "string",
#' NextMaintenanceWindowStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ResizeInfo = list(
#' ResizeType = "string",
#' AllowCancelResize = TRUE|FALSE
#' ),
#' AvailabilityZoneRelocationStatus = "string",
#' ClusterNamespaceArn = "string"
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$delete_cluster(
#' ClusterIdentifier = "string",
#' SkipFinalClusterSnapshot = TRUE|FALSE,
#' FinalClusterSnapshotIdentifier = "string",
#' FinalClusterSnapshotRetentionPeriod = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_delete_cluster
redshift_delete_cluster <- function(ClusterIdentifier, SkipFinalClusterSnapshot = NULL, FinalClusterSnapshotIdentifier = NULL, FinalClusterSnapshotRetentionPeriod = NULL) {
op <- new_operation(
name = "DeleteCluster",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$delete_cluster_input(ClusterIdentifier = ClusterIdentifier, SkipFinalClusterSnapshot = SkipFinalClusterSnapshot, FinalClusterSnapshotIdentifier = FinalClusterSnapshotIdentifier, FinalClusterSnapshotRetentionPeriod = FinalClusterSnapshotRetentionPeriod)
output <- .redshift$delete_cluster_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$delete_cluster <- redshift_delete_cluster
#' Deletes a specified Amazon Redshift parameter group
#'
#' @description
#' Deletes a specified Amazon Redshift parameter group.
#'
#' You cannot delete a parameter group if it is associated with a cluster.
#'
#' @usage
#' redshift_delete_cluster_parameter_group(ParameterGroupName)
#'
#' @param ParameterGroupName [required] The name of the parameter group to be deleted.
#'
#' Constraints:
#'
#' - Must be the name of an existing cluster parameter group.
#'
#' - Cannot delete a default cluster parameter group.
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$delete_cluster_parameter_group(
#' ParameterGroupName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_delete_cluster_parameter_group
redshift_delete_cluster_parameter_group <- function(ParameterGroupName) {
op <- new_operation(
name = "DeleteClusterParameterGroup",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$delete_cluster_parameter_group_input(ParameterGroupName = ParameterGroupName)
output <- .redshift$delete_cluster_parameter_group_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$delete_cluster_parameter_group <- redshift_delete_cluster_parameter_group
#' Deletes an Amazon Redshift security group
#'
#' @description
#' Deletes an Amazon Redshift security group.
#'
#' You cannot delete a security group that is associated with any clusters.
#' You cannot delete the default security group.
#'
#' For information about managing security groups, go to [Amazon Redshift
#' Cluster Security
#' Groups](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-security-groups.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' @usage
#' redshift_delete_cluster_security_group(ClusterSecurityGroupName)
#'
#' @param ClusterSecurityGroupName [required] The name of the cluster security group to be deleted.
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$delete_cluster_security_group(
#' ClusterSecurityGroupName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_delete_cluster_security_group
redshift_delete_cluster_security_group <- function(ClusterSecurityGroupName) {
op <- new_operation(
name = "DeleteClusterSecurityGroup",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$delete_cluster_security_group_input(ClusterSecurityGroupName = ClusterSecurityGroupName)
output <- .redshift$delete_cluster_security_group_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$delete_cluster_security_group <- redshift_delete_cluster_security_group
#' Deletes the specified manual snapshot
#'
#' @description
#' Deletes the specified manual snapshot. The snapshot must be in the
#' `available` state, with no other users authorized to access the
#' snapshot.
#'
#' Unlike automated snapshots, manual snapshots are retained even after you
#' delete your cluster. Amazon Redshift does not delete your manual
#' snapshots. You must delete manual snapshot explicitly to avoid getting
#' charged. If other accounts are authorized to access the snapshot, you
#' must revoke all of the authorizations before you can delete the
#' snapshot.
#'
#' @usage
#' redshift_delete_cluster_snapshot(SnapshotIdentifier,
#' SnapshotClusterIdentifier)
#'
#' @param SnapshotIdentifier [required] The unique identifier of the manual snapshot to be deleted.
#'
#' Constraints: Must be the name of an existing snapshot that is in the
#' `available`, `failed`, or `cancelled` state.
#' @param SnapshotClusterIdentifier The unique identifier of the cluster the snapshot was created from. This
#' parameter is required if your IAM user has a policy containing a
#' snapshot resource element that specifies anything other than * for the
#' cluster name.
#'
#' Constraints: Must be the name of valid cluster.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Snapshot = list(
#' SnapshotIdentifier = "string",
#' ClusterIdentifier = "string",
#' SnapshotCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' Status = "string",
#' Port = 123,
#' AvailabilityZone = "string",
#' ClusterCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' MasterUsername = "string",
#' ClusterVersion = "string",
#' EngineFullVersion = "string",
#' SnapshotType = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' DBName = "string",
#' VpcId = "string",
#' Encrypted = TRUE|FALSE,
#' KmsKeyId = "string",
#' EncryptedWithHSM = TRUE|FALSE,
#' AccountsWithRestoreAccess = list(
#' list(
#' AccountId = "string",
#' AccountAlias = "string"
#' )
#' ),
#' OwnerAccount = "string",
#' TotalBackupSizeInMegaBytes = 123.0,
#' ActualIncrementalBackupSizeInMegaBytes = 123.0,
#' BackupProgressInMegaBytes = 123.0,
#' CurrentBackupRateInMegaBytesPerSecond = 123.0,
#' EstimatedSecondsToCompletion = 123,
#' ElapsedTimeInSeconds = 123,
#' SourceRegion = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' ),
#' RestorableNodeTypes = list(
#' "string"
#' ),
#' EnhancedVpcRouting = TRUE|FALSE,
#' MaintenanceTrackName = "string",
#' ManualSnapshotRetentionPeriod = 123,
#' ManualSnapshotRemainingDays = 123,
#' SnapshotRetentionStartTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$delete_cluster_snapshot(
#' SnapshotIdentifier = "string",
#' SnapshotClusterIdentifier = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_delete_cluster_snapshot
redshift_delete_cluster_snapshot <- function(SnapshotIdentifier, SnapshotClusterIdentifier = NULL) {
op <- new_operation(
name = "DeleteClusterSnapshot",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$delete_cluster_snapshot_input(SnapshotIdentifier = SnapshotIdentifier, SnapshotClusterIdentifier = SnapshotClusterIdentifier)
output <- .redshift$delete_cluster_snapshot_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$delete_cluster_snapshot <- redshift_delete_cluster_snapshot
#' Deletes the specified cluster subnet group
#'
#' @description
#' Deletes the specified cluster subnet group.
#'
#' @usage
#' redshift_delete_cluster_subnet_group(ClusterSubnetGroupName)
#'
#' @param ClusterSubnetGroupName [required] The name of the cluster subnet group name to be deleted.
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$delete_cluster_subnet_group(
#' ClusterSubnetGroupName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_delete_cluster_subnet_group
redshift_delete_cluster_subnet_group <- function(ClusterSubnetGroupName) {
op <- new_operation(
name = "DeleteClusterSubnetGroup",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$delete_cluster_subnet_group_input(ClusterSubnetGroupName = ClusterSubnetGroupName)
output <- .redshift$delete_cluster_subnet_group_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$delete_cluster_subnet_group <- redshift_delete_cluster_subnet_group
#' Deletes an Amazon Redshift event notification subscription
#'
#' @description
#' Deletes an Amazon Redshift event notification subscription.
#'
#' @usage
#' redshift_delete_event_subscription(SubscriptionName)
#'
#' @param SubscriptionName [required] The name of the Amazon Redshift event notification subscription to be
#' deleted.
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$delete_event_subscription(
#' SubscriptionName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_delete_event_subscription
redshift_delete_event_subscription <- function(SubscriptionName) {
op <- new_operation(
name = "DeleteEventSubscription",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$delete_event_subscription_input(SubscriptionName = SubscriptionName)
output <- .redshift$delete_event_subscription_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$delete_event_subscription <- redshift_delete_event_subscription
#' Deletes the specified HSM client certificate
#'
#' @description
#' Deletes the specified HSM client certificate.
#'
#' @usage
#' redshift_delete_hsm_client_certificate(HsmClientCertificateIdentifier)
#'
#' @param HsmClientCertificateIdentifier [required] The identifier of the HSM client certificate to be deleted.
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$delete_hsm_client_certificate(
#' HsmClientCertificateIdentifier = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_delete_hsm_client_certificate
redshift_delete_hsm_client_certificate <- function(HsmClientCertificateIdentifier) {
op <- new_operation(
name = "DeleteHsmClientCertificate",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$delete_hsm_client_certificate_input(HsmClientCertificateIdentifier = HsmClientCertificateIdentifier)
output <- .redshift$delete_hsm_client_certificate_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$delete_hsm_client_certificate <- redshift_delete_hsm_client_certificate
#' Deletes the specified Amazon Redshift HSM configuration
#'
#' @description
#' Deletes the specified Amazon Redshift HSM configuration.
#'
#' @usage
#' redshift_delete_hsm_configuration(HsmConfigurationIdentifier)
#'
#' @param HsmConfigurationIdentifier [required] The identifier of the Amazon Redshift HSM configuration to be deleted.
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$delete_hsm_configuration(
#' HsmConfigurationIdentifier = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_delete_hsm_configuration
redshift_delete_hsm_configuration <- function(HsmConfigurationIdentifier) {
op <- new_operation(
name = "DeleteHsmConfiguration",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$delete_hsm_configuration_input(HsmConfigurationIdentifier = HsmConfigurationIdentifier)
output <- .redshift$delete_hsm_configuration_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$delete_hsm_configuration <- redshift_delete_hsm_configuration
#' Deletes a scheduled action
#'
#' @description
#' Deletes a scheduled action.
#'
#' @usage
#' redshift_delete_scheduled_action(ScheduledActionName)
#'
#' @param ScheduledActionName [required] The name of the scheduled action to delete.
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$delete_scheduled_action(
#' ScheduledActionName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_delete_scheduled_action
redshift_delete_scheduled_action <- function(ScheduledActionName) {
op <- new_operation(
name = "DeleteScheduledAction",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$delete_scheduled_action_input(ScheduledActionName = ScheduledActionName)
output <- .redshift$delete_scheduled_action_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$delete_scheduled_action <- redshift_delete_scheduled_action
#' Deletes the specified snapshot copy grant
#'
#' @description
#' Deletes the specified snapshot copy grant.
#'
#' @usage
#' redshift_delete_snapshot_copy_grant(SnapshotCopyGrantName)
#'
#' @param SnapshotCopyGrantName [required] The name of the snapshot copy grant to delete.
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$delete_snapshot_copy_grant(
#' SnapshotCopyGrantName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_delete_snapshot_copy_grant
redshift_delete_snapshot_copy_grant <- function(SnapshotCopyGrantName) {
op <- new_operation(
name = "DeleteSnapshotCopyGrant",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$delete_snapshot_copy_grant_input(SnapshotCopyGrantName = SnapshotCopyGrantName)
output <- .redshift$delete_snapshot_copy_grant_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$delete_snapshot_copy_grant <- redshift_delete_snapshot_copy_grant
#' Deletes a snapshot schedule
#'
#' @description
#' Deletes a snapshot schedule.
#'
#' @usage
#' redshift_delete_snapshot_schedule(ScheduleIdentifier)
#'
#' @param ScheduleIdentifier [required] A unique identifier of the snapshot schedule to delete.
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$delete_snapshot_schedule(
#' ScheduleIdentifier = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_delete_snapshot_schedule
redshift_delete_snapshot_schedule <- function(ScheduleIdentifier) {
op <- new_operation(
name = "DeleteSnapshotSchedule",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$delete_snapshot_schedule_input(ScheduleIdentifier = ScheduleIdentifier)
output <- .redshift$delete_snapshot_schedule_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$delete_snapshot_schedule <- redshift_delete_snapshot_schedule
#' Deletes tags from a resource
#'
#' @description
#' Deletes tags from a resource. You must provide the ARN of the resource
#' from which you want to delete the tag or tags.
#'
#' @usage
#' redshift_delete_tags(ResourceName, TagKeys)
#'
#' @param ResourceName [required] The Amazon Resource Name (ARN) from which you want to remove the tag or
#' tags. For example, `arn:aws:redshift:us-east-2:123456789:cluster:t1`.
#' @param TagKeys [required] The tag key that you want to delete.
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$delete_tags(
#' ResourceName = "string",
#' TagKeys = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_delete_tags
redshift_delete_tags <- function(ResourceName, TagKeys) {
op <- new_operation(
name = "DeleteTags",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$delete_tags_input(ResourceName = ResourceName, TagKeys = TagKeys)
output <- .redshift$delete_tags_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$delete_tags <- redshift_delete_tags
#' Deletes a usage limit from a cluster
#'
#' @description
#' Deletes a usage limit from a cluster.
#'
#' @usage
#' redshift_delete_usage_limit(UsageLimitId)
#'
#' @param UsageLimitId [required] The identifier of the usage limit to delete.
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$delete_usage_limit(
#' UsageLimitId = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_delete_usage_limit
redshift_delete_usage_limit <- function(UsageLimitId) {
op <- new_operation(
name = "DeleteUsageLimit",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$delete_usage_limit_input(UsageLimitId = UsageLimitId)
output <- .redshift$delete_usage_limit_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$delete_usage_limit <- redshift_delete_usage_limit
#' Returns a list of attributes attached to an account
#'
#' @description
#' Returns a list of attributes attached to an account
#'
#' @usage
#' redshift_describe_account_attributes(AttributeNames)
#'
#' @param AttributeNames A list of attribute names.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' AccountAttributes = list(
#' list(
#' AttributeName = "string",
#' AttributeValues = list(
#' list(
#' AttributeValue = "string"
#' )
#' )
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_account_attributes(
#' AttributeNames = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_describe_account_attributes
redshift_describe_account_attributes <- function(AttributeNames = NULL) {
op <- new_operation(
name = "DescribeAccountAttributes",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_account_attributes_input(AttributeNames = AttributeNames)
output <- .redshift$describe_account_attributes_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_account_attributes <- redshift_describe_account_attributes
#' Returns an array of ClusterDbRevision objects
#'
#' @description
#' Returns an array of `ClusterDbRevision` objects.
#'
#' @usage
#' redshift_describe_cluster_db_revisions(ClusterIdentifier, MaxRecords,
#' Marker)
#'
#' @param ClusterIdentifier A unique identifier for a cluster whose `ClusterDbRevisions` you are
#' requesting. This parameter is case sensitive. All clusters defined for
#' an account are returned by default.
#' @param MaxRecords The maximum number of response records to return in each call. If the
#' number of remaining response records exceeds the specified MaxRecords
#' value, a value is returned in the `marker` field of the response. You
#' can retrieve the next set of response records by providing the returned
#' `marker` value in the `marker` parameter and retrying the request.
#'
#' Default: 100
#'
#' Constraints: minimum 20, maximum 100.
#' @param Marker An optional parameter that specifies the starting point for returning a
#' set of response records. When the results of a
#' [`describe_cluster_db_revisions`][redshift_describe_cluster_db_revisions]
#' request exceed the value specified in `MaxRecords`, Amazon Redshift
#' returns a value in the `marker` field of the response. You can retrieve
#' the next set of response records by providing the returned `marker`
#' value in the `marker` parameter and retrying the request.
#'
#' Constraints: You can specify either the `ClusterIdentifier` parameter,
#' or the `marker` parameter, but not both.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Marker = "string",
#' ClusterDbRevisions = list(
#' list(
#' ClusterIdentifier = "string",
#' CurrentDatabaseRevision = "string",
#' DatabaseRevisionReleaseDate = as.POSIXct(
#' "2015-01-01"
#' ),
#' RevisionTargets = list(
#' list(
#' DatabaseRevision = "string",
#' Description = "string",
#' DatabaseRevisionReleaseDate = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' )
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_cluster_db_revisions(
#' ClusterIdentifier = "string",
#' MaxRecords = 123,
#' Marker = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_describe_cluster_db_revisions
redshift_describe_cluster_db_revisions <- function(ClusterIdentifier = NULL, MaxRecords = NULL, Marker = NULL) {
op <- new_operation(
name = "DescribeClusterDbRevisions",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_cluster_db_revisions_input(ClusterIdentifier = ClusterIdentifier, MaxRecords = MaxRecords, Marker = Marker)
output <- .redshift$describe_cluster_db_revisions_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_cluster_db_revisions <- redshift_describe_cluster_db_revisions
#' Returns a list of Amazon Redshift parameter groups, including parameter
#' groups you created and the default parameter group
#'
#' @description
#' Returns a list of Amazon Redshift parameter groups, including parameter
#' groups you created and the default parameter group. For each parameter
#' group, the response includes the parameter group name, description, and
#' parameter group family name. You can optionally specify a name to
#' retrieve the description of a specific parameter group.
#'
#' For more information about parameters and parameter groups, go to
#' [Amazon Redshift Parameter
#' Groups](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' If you specify both tag keys and tag values in the same request, Amazon
#' Redshift returns all parameter groups that match any combination of the
#' specified keys and values. For example, if you have `owner` and
#' `environment` for tag keys, and `admin` and `test` for tag values, all
#' parameter groups that have any combination of those values are returned.
#'
#' If both tag keys and values are omitted from the request, parameter
#' groups are returned regardless of whether they have tag keys or values
#' associated with them.
#'
#' @usage
#' redshift_describe_cluster_parameter_groups(ParameterGroupName,
#' MaxRecords, Marker, TagKeys, TagValues)
#'
#' @param ParameterGroupName The name of a specific parameter group for which to return details. By
#' default, details about all parameter groups and the default parameter
#' group are returned.
#' @param MaxRecords The maximum number of response records to return in each call. If the
#' number of remaining response records exceeds the specified `MaxRecords`
#' value, a value is returned in a `marker` field of the response. You can
#' retrieve the next set of records by retrying the command with the
#' returned marker value.
#'
#' Default: `100`
#'
#' Constraints: minimum 20, maximum 100.
#' @param Marker An optional parameter that specifies the starting point to return a set
#' of response records. When the results of a
#' [`describe_cluster_parameter_groups`][redshift_describe_cluster_parameter_groups]
#' request exceed the value specified in `MaxRecords`, AWS returns a value
#' in the `Marker` field of the response. You can retrieve the next set of
#' response records by providing the returned marker value in the `Marker`
#' parameter and retrying the request.
#' @param TagKeys A tag key or keys for which you want to return all matching cluster
#' parameter groups that are associated with the specified key or keys. For
#' example, suppose that you have parameter groups that are tagged with
#' keys called `owner` and `environment`. If you specify both of these tag
#' keys in the request, Amazon Redshift returns a response with the
#' parameter groups that have either or both of these tag keys associated
#' with them.
#' @param TagValues A tag value or values for which you want to return all matching cluster
#' parameter groups that are associated with the specified tag value or
#' values. For example, suppose that you have parameter groups that are
#' tagged with values called `admin` and `test`. If you specify both of
#' these tag values in the request, Amazon Redshift returns a response with
#' the parameter groups that have either or both of these tag values
#' associated with them.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Marker = "string",
#' ParameterGroups = list(
#' list(
#' ParameterGroupName = "string",
#' ParameterGroupFamily = "string",
#' Description = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_cluster_parameter_groups(
#' ParameterGroupName = "string",
#' MaxRecords = 123,
#' Marker = "string",
#' TagKeys = list(
#' "string"
#' ),
#' TagValues = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_describe_cluster_parameter_groups
redshift_describe_cluster_parameter_groups <- function(ParameterGroupName = NULL, MaxRecords = NULL, Marker = NULL, TagKeys = NULL, TagValues = NULL) {
op <- new_operation(
name = "DescribeClusterParameterGroups",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_cluster_parameter_groups_input(ParameterGroupName = ParameterGroupName, MaxRecords = MaxRecords, Marker = Marker, TagKeys = TagKeys, TagValues = TagValues)
output <- .redshift$describe_cluster_parameter_groups_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_cluster_parameter_groups <- redshift_describe_cluster_parameter_groups
#' Returns a detailed list of parameters contained within the specified
#' Amazon Redshift parameter group
#'
#' @description
#' Returns a detailed list of parameters contained within the specified
#' Amazon Redshift parameter group. For each parameter the response
#' includes information such as parameter name, description, data type,
#' value, whether the parameter value is modifiable, and so on.
#'
#' You can specify *source* filter to retrieve parameters of only specific
#' type. For example, to retrieve parameters that were modified by a user
#' action such as from
#' [`modify_cluster_parameter_group`][redshift_modify_cluster_parameter_group],
#' you can specify *source* equal to *user*.
#'
#' For more information about parameters and parameter groups, go to
#' [Amazon Redshift Parameter
#' Groups](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' @usage
#' redshift_describe_cluster_parameters(ParameterGroupName, Source,
#' MaxRecords, Marker)
#'
#' @param ParameterGroupName [required] The name of a cluster parameter group for which to return details.
#' @param Source The parameter types to return. Specify `user` to show parameters that
#' are different form the default. Similarly, specify `engine-default` to
#' show parameters that are the same as the default parameter group.
#'
#' Default: All parameter types returned.
#'
#' Valid Values: `user` | `engine-default`
#' @param MaxRecords The maximum number of response records to return in each call. If the
#' number of remaining response records exceeds the specified `MaxRecords`
#' value, a value is returned in a `marker` field of the response. You can
#' retrieve the next set of records by retrying the command with the
#' returned marker value.
#'
#' Default: `100`
#'
#' Constraints: minimum 20, maximum 100.
#' @param Marker An optional parameter that specifies the starting point to return a set
#' of response records. When the results of a
#' [`describe_cluster_parameters`][redshift_describe_cluster_parameters]
#' request exceed the value specified in `MaxRecords`, AWS returns a value
#' in the `Marker` field of the response. You can retrieve the next set of
#' response records by providing the returned marker value in the `Marker`
#' parameter and retrying the request.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Parameters = list(
#' list(
#' ParameterName = "string",
#' ParameterValue = "string",
#' Description = "string",
#' Source = "string",
#' DataType = "string",
#' AllowedValues = "string",
#' ApplyType = "static"|"dynamic",
#' IsModifiable = TRUE|FALSE,
#' MinimumEngineVersion = "string"
#' )
#' ),
#' Marker = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_cluster_parameters(
#' ParameterGroupName = "string",
#' Source = "string",
#' MaxRecords = 123,
#' Marker = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_describe_cluster_parameters
redshift_describe_cluster_parameters <- function(ParameterGroupName, Source = NULL, MaxRecords = NULL, Marker = NULL) {
op <- new_operation(
name = "DescribeClusterParameters",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_cluster_parameters_input(ParameterGroupName = ParameterGroupName, Source = Source, MaxRecords = MaxRecords, Marker = Marker)
output <- .redshift$describe_cluster_parameters_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_cluster_parameters <- redshift_describe_cluster_parameters
#' Returns information about Amazon Redshift security groups
#'
#' @description
#' Returns information about Amazon Redshift security groups. If the name
#' of a security group is specified, the response will contain only
#' information about only that security group.
#'
#' For information about managing security groups, go to [Amazon Redshift
#' Cluster Security
#' Groups](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-security-groups.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' If you specify both tag keys and tag values in the same request, Amazon
#' Redshift returns all security groups that match any combination of the
#' specified keys and values. For example, if you have `owner` and
#' `environment` for tag keys, and `admin` and `test` for tag values, all
#' security groups that have any combination of those values are returned.
#'
#' If both tag keys and values are omitted from the request, security
#' groups are returned regardless of whether they have tag keys or values
#' associated with them.
#'
#' @usage
#' redshift_describe_cluster_security_groups(ClusterSecurityGroupName,
#' MaxRecords, Marker, TagKeys, TagValues)
#'
#' @param ClusterSecurityGroupName The name of a cluster security group for which you are requesting
#' details. You can specify either the **Marker** parameter or a
#' **ClusterSecurityGroupName** parameter, but not both.
#'
#' Example: `securitygroup1`
#' @param MaxRecords The maximum number of response records to return in each call. If the
#' number of remaining response records exceeds the specified `MaxRecords`
#' value, a value is returned in a `marker` field of the response. You can
#' retrieve the next set of records by retrying the command with the
#' returned marker value.
#'
#' Default: `100`
#'
#' Constraints: minimum 20, maximum 100.
#' @param Marker An optional parameter that specifies the starting point to return a set
#' of response records. When the results of a
#' [`describe_cluster_security_groups`][redshift_describe_cluster_security_groups]
#' request exceed the value specified in `MaxRecords`, AWS returns a value
#' in the `Marker` field of the response. You can retrieve the next set of
#' response records by providing the returned marker value in the `Marker`
#' parameter and retrying the request.
#'
#' Constraints: You can specify either the **ClusterSecurityGroupName**
#' parameter or the **Marker** parameter, but not both.
#' @param TagKeys A tag key or keys for which you want to return all matching cluster
#' security groups that are associated with the specified key or keys. For
#' example, suppose that you have security groups that are tagged with keys
#' called `owner` and `environment`. If you specify both of these tag keys
#' in the request, Amazon Redshift returns a response with the security
#' groups that have either or both of these tag keys associated with them.
#' @param TagValues A tag value or values for which you want to return all matching cluster
#' security groups that are associated with the specified tag value or
#' values. For example, suppose that you have security groups that are
#' tagged with values called `admin` and `test`. If you specify both of
#' these tag values in the request, Amazon Redshift returns a response with
#' the security groups that have either or both of these tag values
#' associated with them.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Marker = "string",
#' ClusterSecurityGroups = list(
#' list(
#' ClusterSecurityGroupName = "string",
#' Description = "string",
#' EC2SecurityGroups = list(
#' list(
#' Status = "string",
#' EC2SecurityGroupName = "string",
#' EC2SecurityGroupOwnerId = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' ),
#' IPRanges = list(
#' list(
#' Status = "string",
#' CIDRIP = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' ),
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_cluster_security_groups(
#' ClusterSecurityGroupName = "string",
#' MaxRecords = 123,
#' Marker = "string",
#' TagKeys = list(
#' "string"
#' ),
#' TagValues = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_describe_cluster_security_groups
redshift_describe_cluster_security_groups <- function(ClusterSecurityGroupName = NULL, MaxRecords = NULL, Marker = NULL, TagKeys = NULL, TagValues = NULL) {
op <- new_operation(
name = "DescribeClusterSecurityGroups",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_cluster_security_groups_input(ClusterSecurityGroupName = ClusterSecurityGroupName, MaxRecords = MaxRecords, Marker = Marker, TagKeys = TagKeys, TagValues = TagValues)
output <- .redshift$describe_cluster_security_groups_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_cluster_security_groups <- redshift_describe_cluster_security_groups
#' Returns one or more snapshot objects, which contain metadata about your
#' cluster snapshots
#'
#' @description
#' Returns one or more snapshot objects, which contain metadata about your
#' cluster snapshots. By default, this operation returns information about
#' all snapshots of all clusters that are owned by you AWS customer
#' account. No information is returned for snapshots owned by inactive AWS
#' customer accounts.
#'
#' If you specify both tag keys and tag values in the same request, Amazon
#' Redshift returns all snapshots that match any combination of the
#' specified keys and values. For example, if you have `owner` and
#' `environment` for tag keys, and `admin` and `test` for tag values, all
#' snapshots that have any combination of those values are returned. Only
#' snapshots that you own are returned in the response; shared snapshots
#' are not returned with the tag key and tag value request parameters.
#'
#' If both tag keys and values are omitted from the request, snapshots are
#' returned regardless of whether they have tag keys or values associated
#' with them.
#'
#' @usage
#' redshift_describe_cluster_snapshots(ClusterIdentifier,
#' SnapshotIdentifier, SnapshotType, StartTime, EndTime, MaxRecords,
#' Marker, OwnerAccount, TagKeys, TagValues, ClusterExists,
#' SortingEntities)
#'
#' @param ClusterIdentifier The identifier of the cluster which generated the requested snapshots.
#' @param SnapshotIdentifier The snapshot identifier of the snapshot about which to return
#' information.
#' @param SnapshotType The type of snapshots for which you are requesting information. By
#' default, snapshots of all types are returned.
#'
#' Valid Values: `automated` | `manual`
#' @param StartTime A value that requests only snapshots created at or after the specified
#' time. The time value is specified in ISO 8601 format. For more
#' information about ISO 8601, go to the [ISO8601 Wikipedia
#' page.](https://en.wikipedia.org/wiki/ISO_8601)
#'
#' Example: `2012-07-16T18:00:00Z`
#' @param EndTime A time value that requests only snapshots created at or before the
#' specified time. The time value is specified in ISO 8601 format. For more
#' information about ISO 8601, go to the [ISO8601 Wikipedia
#' page.](https://en.wikipedia.org/wiki/ISO_8601)
#'
#' Example: `2012-07-16T18:00:00Z`
#' @param MaxRecords The maximum number of response records to return in each call. If the
#' number of remaining response records exceeds the specified `MaxRecords`
#' value, a value is returned in a `marker` field of the response. You can
#' retrieve the next set of records by retrying the command with the
#' returned marker value.
#'
#' Default: `100`
#'
#' Constraints: minimum 20, maximum 100.
#' @param Marker An optional parameter that specifies the starting point to return a set
#' of response records. When the results of a
#' [`describe_cluster_snapshots`][redshift_describe_cluster_snapshots]
#' request exceed the value specified in `MaxRecords`, AWS returns a value
#' in the `Marker` field of the response. You can retrieve the next set of
#' response records by providing the returned marker value in the `Marker`
#' parameter and retrying the request.
#' @param OwnerAccount The AWS customer account used to create or copy the snapshot. Use this
#' field to filter the results to snapshots owned by a particular account.
#' To describe snapshots you own, either specify your AWS customer account,
#' or do not specify the parameter.
#' @param TagKeys A tag key or keys for which you want to return all matching cluster
#' snapshots that are associated with the specified key or keys. For
#' example, suppose that you have snapshots that are tagged with keys
#' called `owner` and `environment`. If you specify both of these tag keys
#' in the request, Amazon Redshift returns a response with the snapshots
#' that have either or both of these tag keys associated with them.
#' @param TagValues A tag value or values for which you want to return all matching cluster
#' snapshots that are associated with the specified tag value or values.
#' For example, suppose that you have snapshots that are tagged with values
#' called `admin` and `test`. If you specify both of these tag values in
#' the request, Amazon Redshift returns a response with the snapshots that
#' have either or both of these tag values associated with them.
#' @param ClusterExists A value that indicates whether to return snapshots only for an existing
#' cluster. You can perform table-level restore only by using a snapshot of
#' an existing cluster, that is, a cluster that has not been deleted.
#' Values for this parameter work as follows:
#'
#' - If `ClusterExists` is set to `true`, `ClusterIdentifier` is
#' required.
#'
#' - If `ClusterExists` is set to `false` and `ClusterIdentifier` isn't
#' specified, all snapshots associated with deleted clusters (orphaned
#' snapshots) are returned.
#'
#' - If `ClusterExists` is set to `false` and `ClusterIdentifier` is
#' specified for a deleted cluster, snapshots associated with that
#' cluster are returned.
#'
#' - If `ClusterExists` is set to `false` and `ClusterIdentifier` is
#' specified for an existing cluster, no snapshots are returned.
#' @param SortingEntities
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Marker = "string",
#' Snapshots = list(
#' list(
#' SnapshotIdentifier = "string",
#' ClusterIdentifier = "string",
#' SnapshotCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' Status = "string",
#' Port = 123,
#' AvailabilityZone = "string",
#' ClusterCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' MasterUsername = "string",
#' ClusterVersion = "string",
#' EngineFullVersion = "string",
#' SnapshotType = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' DBName = "string",
#' VpcId = "string",
#' Encrypted = TRUE|FALSE,
#' KmsKeyId = "string",
#' EncryptedWithHSM = TRUE|FALSE,
#' AccountsWithRestoreAccess = list(
#' list(
#' AccountId = "string",
#' AccountAlias = "string"
#' )
#' ),
#' OwnerAccount = "string",
#' TotalBackupSizeInMegaBytes = 123.0,
#' ActualIncrementalBackupSizeInMegaBytes = 123.0,
#' BackupProgressInMegaBytes = 123.0,
#' CurrentBackupRateInMegaBytesPerSecond = 123.0,
#' EstimatedSecondsToCompletion = 123,
#' ElapsedTimeInSeconds = 123,
#' SourceRegion = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' ),
#' RestorableNodeTypes = list(
#' "string"
#' ),
#' EnhancedVpcRouting = TRUE|FALSE,
#' MaintenanceTrackName = "string",
#' ManualSnapshotRetentionPeriod = 123,
#' ManualSnapshotRemainingDays = 123,
#' SnapshotRetentionStartTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_cluster_snapshots(
#' ClusterIdentifier = "string",
#' SnapshotIdentifier = "string",
#' SnapshotType = "string",
#' StartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' EndTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' MaxRecords = 123,
#' Marker = "string",
#' OwnerAccount = "string",
#' TagKeys = list(
#' "string"
#' ),
#' TagValues = list(
#' "string"
#' ),
#' ClusterExists = TRUE|FALSE,
#' SortingEntities = list(
#' list(
#' Attribute = "SOURCE_TYPE"|"TOTAL_SIZE"|"CREATE_TIME",
#' SortOrder = "ASC"|"DESC"
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_describe_cluster_snapshots
redshift_describe_cluster_snapshots <- function(ClusterIdentifier = NULL, SnapshotIdentifier = NULL, SnapshotType = NULL, StartTime = NULL, EndTime = NULL, MaxRecords = NULL, Marker = NULL, OwnerAccount = NULL, TagKeys = NULL, TagValues = NULL, ClusterExists = NULL, SortingEntities = NULL) {
op <- new_operation(
name = "DescribeClusterSnapshots",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_cluster_snapshots_input(ClusterIdentifier = ClusterIdentifier, SnapshotIdentifier = SnapshotIdentifier, SnapshotType = SnapshotType, StartTime = StartTime, EndTime = EndTime, MaxRecords = MaxRecords, Marker = Marker, OwnerAccount = OwnerAccount, TagKeys = TagKeys, TagValues = TagValues, ClusterExists = ClusterExists, SortingEntities = SortingEntities)
output <- .redshift$describe_cluster_snapshots_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_cluster_snapshots <- redshift_describe_cluster_snapshots
#' Returns one or more cluster subnet group objects, which contain metadata
#' about your cluster subnet groups
#'
#' @description
#' Returns one or more cluster subnet group objects, which contain metadata
#' about your cluster subnet groups. By default, this operation returns
#' information about all cluster subnet groups that are defined in you AWS
#' account.
#'
#' If you specify both tag keys and tag values in the same request, Amazon
#' Redshift returns all subnet groups that match any combination of the
#' specified keys and values. For example, if you have `owner` and
#' `environment` for tag keys, and `admin` and `test` for tag values, all
#' subnet groups that have any combination of those values are returned.
#'
#' If both tag keys and values are omitted from the request, subnet groups
#' are returned regardless of whether they have tag keys or values
#' associated with them.
#'
#' @usage
#' redshift_describe_cluster_subnet_groups(ClusterSubnetGroupName,
#' MaxRecords, Marker, TagKeys, TagValues)
#'
#' @param ClusterSubnetGroupName The name of the cluster subnet group for which information is requested.
#' @param MaxRecords The maximum number of response records to return in each call. If the
#' number of remaining response records exceeds the specified `MaxRecords`
#' value, a value is returned in a `marker` field of the response. You can
#' retrieve the next set of records by retrying the command with the
#' returned marker value.
#'
#' Default: `100`
#'
#' Constraints: minimum 20, maximum 100.
#' @param Marker An optional parameter that specifies the starting point to return a set
#' of response records. When the results of a
#' [`describe_cluster_subnet_groups`][redshift_describe_cluster_subnet_groups]
#' request exceed the value specified in `MaxRecords`, AWS returns a value
#' in the `Marker` field of the response. You can retrieve the next set of
#' response records by providing the returned marker value in the `Marker`
#' parameter and retrying the request.
#' @param TagKeys A tag key or keys for which you want to return all matching cluster
#' subnet groups that are associated with the specified key or keys. For
#' example, suppose that you have subnet groups that are tagged with keys
#' called `owner` and `environment`. If you specify both of these tag keys
#' in the request, Amazon Redshift returns a response with the subnet
#' groups that have either or both of these tag keys associated with them.
#' @param TagValues A tag value or values for which you want to return all matching cluster
#' subnet groups that are associated with the specified tag value or
#' values. For example, suppose that you have subnet groups that are tagged
#' with values called `admin` and `test`. If you specify both of these tag
#' values in the request, Amazon Redshift returns a response with the
#' subnet groups that have either or both of these tag values associated
#' with them.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Marker = "string",
#' ClusterSubnetGroups = list(
#' list(
#' ClusterSubnetGroupName = "string",
#' Description = "string",
#' VpcId = "string",
#' SubnetGroupStatus = "string",
#' Subnets = list(
#' list(
#' SubnetIdentifier = "string",
#' SubnetAvailabilityZone = list(
#' Name = "string",
#' SupportedPlatforms = list(
#' list(
#' Name = "string"
#' )
#' )
#' ),
#' SubnetStatus = "string"
#' )
#' ),
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_cluster_subnet_groups(
#' ClusterSubnetGroupName = "string",
#' MaxRecords = 123,
#' Marker = "string",
#' TagKeys = list(
#' "string"
#' ),
#' TagValues = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_describe_cluster_subnet_groups
redshift_describe_cluster_subnet_groups <- function(ClusterSubnetGroupName = NULL, MaxRecords = NULL, Marker = NULL, TagKeys = NULL, TagValues = NULL) {
op <- new_operation(
name = "DescribeClusterSubnetGroups",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_cluster_subnet_groups_input(ClusterSubnetGroupName = ClusterSubnetGroupName, MaxRecords = MaxRecords, Marker = Marker, TagKeys = TagKeys, TagValues = TagValues)
output <- .redshift$describe_cluster_subnet_groups_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_cluster_subnet_groups <- redshift_describe_cluster_subnet_groups
#' Returns a list of all the available maintenance tracks
#'
#' @description
#' Returns a list of all the available maintenance tracks.
#'
#' @usage
#' redshift_describe_cluster_tracks(MaintenanceTrackName, MaxRecords,
#' Marker)
#'
#' @param MaintenanceTrackName The name of the maintenance track.
#' @param MaxRecords An integer value for the maximum number of maintenance tracks to return.
#' @param Marker An optional parameter that specifies the starting point to return a set
#' of response records. When the results of a
#' [`describe_cluster_tracks`][redshift_describe_cluster_tracks] request
#' exceed the value specified in `MaxRecords`, Amazon Redshift returns a
#' value in the `Marker` field of the response. You can retrieve the next
#' set of response records by providing the returned marker value in the
#' `Marker` parameter and retrying the request.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' MaintenanceTracks = list(
#' list(
#' MaintenanceTrackName = "string",
#' DatabaseVersion = "string",
#' UpdateTargets = list(
#' list(
#' MaintenanceTrackName = "string",
#' DatabaseVersion = "string",
#' SupportedOperations = list(
#' list(
#' OperationName = "string"
#' )
#' )
#' )
#' )
#' )
#' ),
#' Marker = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_cluster_tracks(
#' MaintenanceTrackName = "string",
#' MaxRecords = 123,
#' Marker = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_describe_cluster_tracks
redshift_describe_cluster_tracks <- function(MaintenanceTrackName = NULL, MaxRecords = NULL, Marker = NULL) {
op <- new_operation(
name = "DescribeClusterTracks",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_cluster_tracks_input(MaintenanceTrackName = MaintenanceTrackName, MaxRecords = MaxRecords, Marker = Marker)
output <- .redshift$describe_cluster_tracks_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_cluster_tracks <- redshift_describe_cluster_tracks
#' Returns descriptions of the available Amazon Redshift cluster versions
#'
#' @description
#' Returns descriptions of the available Amazon Redshift cluster versions.
#' You can call this operation even before creating any clusters to learn
#' more about the Amazon Redshift versions. For more information about
#' managing clusters, go to [Amazon Redshift
#' Clusters](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' @usage
#' redshift_describe_cluster_versions(ClusterVersion,
#' ClusterParameterGroupFamily, MaxRecords, Marker)
#'
#' @param ClusterVersion The specific cluster version to return.
#'
#' Example: `1.0`
#' @param ClusterParameterGroupFamily The name of a specific cluster parameter group family to return details
#' for.
#'
#' Constraints:
#'
#' - Must be 1 to 255 alphanumeric characters
#'
#' - First character must be a letter
#'
#' - Cannot end with a hyphen or contain two consecutive hyphens
#' @param MaxRecords The maximum number of response records to return in each call. If the
#' number of remaining response records exceeds the specified `MaxRecords`
#' value, a value is returned in a `marker` field of the response. You can
#' retrieve the next set of records by retrying the command with the
#' returned marker value.
#'
#' Default: `100`
#'
#' Constraints: minimum 20, maximum 100.
#' @param Marker An optional parameter that specifies the starting point to return a set
#' of response records. When the results of a
#' [`describe_cluster_versions`][redshift_describe_cluster_versions]
#' request exceed the value specified in `MaxRecords`, AWS returns a value
#' in the `Marker` field of the response. You can retrieve the next set of
#' response records by providing the returned marker value in the `Marker`
#' parameter and retrying the request.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Marker = "string",
#' ClusterVersions = list(
#' list(
#' ClusterVersion = "string",
#' ClusterParameterGroupFamily = "string",
#' Description = "string"
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_cluster_versions(
#' ClusterVersion = "string",
#' ClusterParameterGroupFamily = "string",
#' MaxRecords = 123,
#' Marker = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_describe_cluster_versions
redshift_describe_cluster_versions <- function(ClusterVersion = NULL, ClusterParameterGroupFamily = NULL, MaxRecords = NULL, Marker = NULL) {
op <- new_operation(
name = "DescribeClusterVersions",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_cluster_versions_input(ClusterVersion = ClusterVersion, ClusterParameterGroupFamily = ClusterParameterGroupFamily, MaxRecords = MaxRecords, Marker = Marker)
output <- .redshift$describe_cluster_versions_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_cluster_versions <- redshift_describe_cluster_versions
#' Returns properties of provisioned clusters including general cluster
#' properties, cluster database properties, maintenance and backup
#' properties, and security and access properties
#'
#' @description
#' Returns properties of provisioned clusters including general cluster
#' properties, cluster database properties, maintenance and backup
#' properties, and security and access properties. This operation supports
#' pagination. For more information about managing clusters, go to [Amazon
#' Redshift
#' Clusters](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' If you specify both tag keys and tag values in the same request, Amazon
#' Redshift returns all clusters that match any combination of the
#' specified keys and values. For example, if you have `owner` and
#' `environment` for tag keys, and `admin` and `test` for tag values, all
#' clusters that have any combination of those values are returned.
#'
#' If both tag keys and values are omitted from the request, clusters are
#' returned regardless of whether they have tag keys or values associated
#' with them.
#'
#' @usage
#' redshift_describe_clusters(ClusterIdentifier, MaxRecords, Marker,
#' TagKeys, TagValues)
#'
#' @param ClusterIdentifier The unique identifier of a cluster whose properties you are requesting.
#' This parameter is case sensitive.
#'
#' The default is that all clusters defined for an account are returned.
#' @param MaxRecords The maximum number of response records to return in each call. If the
#' number of remaining response records exceeds the specified `MaxRecords`
#' value, a value is returned in a `marker` field of the response. You can
#' retrieve the next set of records by retrying the command with the
#' returned marker value.
#'
#' Default: `100`
#'
#' Constraints: minimum 20, maximum 100.
#' @param Marker An optional parameter that specifies the starting point to return a set
#' of response records. When the results of a
#' [`describe_clusters`][redshift_describe_clusters] request exceed the
#' value specified in `MaxRecords`, AWS returns a value in the `Marker`
#' field of the response. You can retrieve the next set of response records
#' by providing the returned marker value in the `Marker` parameter and
#' retrying the request.
#'
#' Constraints: You can specify either the **ClusterIdentifier** parameter
#' or the **Marker** parameter, but not both.
#' @param TagKeys A tag key or keys for which you want to return all matching clusters
#' that are associated with the specified key or keys. For example, suppose
#' that you have clusters that are tagged with keys called `owner` and
#' `environment`. If you specify both of these tag keys in the request,
#' Amazon Redshift returns a response with the clusters that have either or
#' both of these tag keys associated with them.
#' @param TagValues A tag value or values for which you want to return all matching clusters
#' that are associated with the specified tag value or values. For example,
#' suppose that you have clusters that are tagged with values called
#' `admin` and `test`. If you specify both of these tag values in the
#' request, Amazon Redshift returns a response with the clusters that have
#' either or both of these tag values associated with them.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Marker = "string",
#' Clusters = list(
#' list(
#' ClusterIdentifier = "string",
#' NodeType = "string",
#' ClusterStatus = "string",
#' ClusterAvailabilityStatus = "string",
#' ModifyStatus = "string",
#' MasterUsername = "string",
#' DBName = "string",
#' Endpoint = list(
#' Address = "string",
#' Port = 123,
#' VpcEndpoints = list(
#' list(
#' VpcEndpointId = "string"
#' )
#' )
#' ),
#' ClusterCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' AutomatedSnapshotRetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' ClusterSecurityGroups = list(
#' list(
#' ClusterSecurityGroupName = "string",
#' Status = "string"
#' )
#' ),
#' VpcSecurityGroups = list(
#' list(
#' VpcSecurityGroupId = "string",
#' Status = "string"
#' )
#' ),
#' ClusterParameterGroups = list(
#' list(
#' ParameterGroupName = "string",
#' ParameterApplyStatus = "string",
#' ClusterParameterStatusList = list(
#' list(
#' ParameterName = "string",
#' ParameterApplyStatus = "string",
#' ParameterApplyErrorDescription = "string"
#' )
#' )
#' )
#' ),
#' ClusterSubnetGroupName = "string",
#' VpcId = "string",
#' AvailabilityZone = "string",
#' PreferredMaintenanceWindow = "string",
#' PendingModifiedValues = list(
#' MasterUserPassword = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' ClusterType = "string",
#' ClusterVersion = "string",
#' AutomatedSnapshotRetentionPeriod = 123,
#' ClusterIdentifier = "string",
#' PubliclyAccessible = TRUE|FALSE,
#' EnhancedVpcRouting = TRUE|FALSE,
#' MaintenanceTrackName = "string",
#' EncryptionType = "string"
#' ),
#' ClusterVersion = "string",
#' AllowVersionUpgrade = TRUE|FALSE,
#' NumberOfNodes = 123,
#' PubliclyAccessible = TRUE|FALSE,
#' Encrypted = TRUE|FALSE,
#' RestoreStatus = list(
#' Status = "string",
#' CurrentRestoreRateInMegaBytesPerSecond = 123.0,
#' SnapshotSizeInMegaBytes = 123,
#' ProgressInMegaBytes = 123,
#' ElapsedTimeInSeconds = 123,
#' EstimatedTimeToCompletionInSeconds = 123
#' ),
#' DataTransferProgress = list(
#' Status = "string",
#' CurrentRateInMegaBytesPerSecond = 123.0,
#' TotalDataInMegaBytes = 123,
#' DataTransferredInMegaBytes = 123,
#' EstimatedTimeToCompletionInSeconds = 123,
#' ElapsedTimeInSeconds = 123
#' ),
#' HsmStatus = list(
#' HsmClientCertificateIdentifier = "string",
#' HsmConfigurationIdentifier = "string",
#' Status = "string"
#' ),
#' ClusterSnapshotCopyStatus = list(
#' DestinationRegion = "string",
#' RetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' SnapshotCopyGrantName = "string"
#' ),
#' ClusterPublicKey = "string",
#' ClusterNodes = list(
#' list(
#' NodeRole = "string",
#' PrivateIPAddress = "string",
#' PublicIPAddress = "string"
#' )
#' ),
#' ElasticIpStatus = list(
#' ElasticIp = "string",
#' Status = "string"
#' ),
#' ClusterRevisionNumber = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' ),
#' KmsKeyId = "string",
#' EnhancedVpcRouting = TRUE|FALSE,
#' IamRoles = list(
#' list(
#' IamRoleArn = "string",
#' ApplyStatus = "string"
#' )
#' ),
#' PendingActions = list(
#' "string"
#' ),
#' MaintenanceTrackName = "string",
#' ElasticResizeNumberOfNodeOptions = "string",
#' DeferredMaintenanceWindows = list(
#' list(
#' DeferMaintenanceIdentifier = "string",
#' DeferMaintenanceStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' DeferMaintenanceEndTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' ),
#' SnapshotScheduleIdentifier = "string",
#' SnapshotScheduleState = "MODIFYING"|"ACTIVE"|"FAILED",
#' ExpectedNextSnapshotScheduleTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ExpectedNextSnapshotScheduleTimeStatus = "string",
#' NextMaintenanceWindowStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ResizeInfo = list(
#' ResizeType = "string",
#' AllowCancelResize = TRUE|FALSE
#' ),
#' AvailabilityZoneRelocationStatus = "string",
#' ClusterNamespaceArn = "string"
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_clusters(
#' ClusterIdentifier = "string",
#' MaxRecords = 123,
#' Marker = "string",
#' TagKeys = list(
#' "string"
#' ),
#' TagValues = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_describe_clusters
redshift_describe_clusters <- function(ClusterIdentifier = NULL, MaxRecords = NULL, Marker = NULL, TagKeys = NULL, TagValues = NULL) {
op <- new_operation(
name = "DescribeClusters",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_clusters_input(ClusterIdentifier = ClusterIdentifier, MaxRecords = MaxRecords, Marker = Marker, TagKeys = TagKeys, TagValues = TagValues)
output <- .redshift$describe_clusters_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_clusters <- redshift_describe_clusters
#' Returns a list of parameter settings for the specified parameter group
#' family
#'
#' @description
#' Returns a list of parameter settings for the specified parameter group
#' family.
#'
#' For more information about parameters and parameter groups, go to
#' [Amazon Redshift Parameter
#' Groups](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' @usage
#' redshift_describe_default_cluster_parameters(ParameterGroupFamily,
#' MaxRecords, Marker)
#'
#' @param ParameterGroupFamily [required] The name of the cluster parameter group family.
#' @param MaxRecords The maximum number of response records to return in each call. If the
#' number of remaining response records exceeds the specified `MaxRecords`
#' value, a value is returned in a `marker` field of the response. You can
#' retrieve the next set of records by retrying the command with the
#' returned marker value.
#'
#' Default: `100`
#'
#' Constraints: minimum 20, maximum 100.
#' @param Marker An optional parameter that specifies the starting point to return a set
#' of response records. When the results of a
#' [`describe_default_cluster_parameters`][redshift_describe_default_cluster_parameters]
#' request exceed the value specified in `MaxRecords`, AWS returns a value
#' in the `Marker` field of the response. You can retrieve the next set of
#' response records by providing the returned marker value in the `Marker`
#' parameter and retrying the request.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' DefaultClusterParameters = list(
#' ParameterGroupFamily = "string",
#' Marker = "string",
#' Parameters = list(
#' list(
#' ParameterName = "string",
#' ParameterValue = "string",
#' Description = "string",
#' Source = "string",
#' DataType = "string",
#' AllowedValues = "string",
#' ApplyType = "static"|"dynamic",
#' IsModifiable = TRUE|FALSE,
#' MinimumEngineVersion = "string"
#' )
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_default_cluster_parameters(
#' ParameterGroupFamily = "string",
#' MaxRecords = 123,
#' Marker = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_describe_default_cluster_parameters
redshift_describe_default_cluster_parameters <- function(ParameterGroupFamily, MaxRecords = NULL, Marker = NULL) {
op <- new_operation(
name = "DescribeDefaultClusterParameters",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_default_cluster_parameters_input(ParameterGroupFamily = ParameterGroupFamily, MaxRecords = MaxRecords, Marker = Marker)
output <- .redshift$describe_default_cluster_parameters_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_default_cluster_parameters <- redshift_describe_default_cluster_parameters
#' Displays a list of event categories for all event source types, or for a
#' specified source type
#'
#' @description
#' Displays a list of event categories for all event source types, or for a
#' specified source type. For a list of the event categories and source
#' types, go to [Amazon Redshift Event
#' Notifications](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-event-notifications.html).
#'
#' @usage
#' redshift_describe_event_categories(SourceType)
#'
#' @param SourceType The source type, such as cluster or parameter group, to which the
#' described event categories apply.
#'
#' Valid values: cluster, cluster-snapshot, cluster-parameter-group,
#' cluster-security-group, and scheduled-action.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' EventCategoriesMapList = list(
#' list(
#' SourceType = "string",
#' Events = list(
#' list(
#' EventId = "string",
#' EventCategories = list(
#' "string"
#' ),
#' EventDescription = "string",
#' Severity = "string"
#' )
#' )
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_event_categories(
#' SourceType = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_describe_event_categories
redshift_describe_event_categories <- function(SourceType = NULL) {
op <- new_operation(
name = "DescribeEventCategories",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_event_categories_input(SourceType = SourceType)
output <- .redshift$describe_event_categories_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_event_categories <- redshift_describe_event_categories
#' Lists descriptions of all the Amazon Redshift event notification
#' subscriptions for a customer account
#'
#' @description
#' Lists descriptions of all the Amazon Redshift event notification
#' subscriptions for a customer account. If you specify a subscription
#' name, lists the description for that subscription.
#'
#' If you specify both tag keys and tag values in the same request, Amazon
#' Redshift returns all event notification subscriptions that match any
#' combination of the specified keys and values. For example, if you have
#' `owner` and `environment` for tag keys, and `admin` and `test` for tag
#' values, all subscriptions that have any combination of those values are
#' returned.
#'
#' If both tag keys and values are omitted from the request, subscriptions
#' are returned regardless of whether they have tag keys or values
#' associated with them.
#'
#' @usage
#' redshift_describe_event_subscriptions(SubscriptionName, MaxRecords,
#' Marker, TagKeys, TagValues)
#'
#' @param SubscriptionName The name of the Amazon Redshift event notification subscription to be
#' described.
#' @param MaxRecords The maximum number of response records to return in each call. If the
#' number of remaining response records exceeds the specified `MaxRecords`
#' value, a value is returned in a `marker` field of the response. You can
#' retrieve the next set of records by retrying the command with the
#' returned marker value.
#'
#' Default: `100`
#'
#' Constraints: minimum 20, maximum 100.
#' @param Marker An optional parameter that specifies the starting point to return a set
#' of response records. When the results of a DescribeEventSubscriptions
#' request exceed the value specified in `MaxRecords`, AWS returns a value
#' in the `Marker` field of the response. You can retrieve the next set of
#' response records by providing the returned marker value in the `Marker`
#' parameter and retrying the request.
#' @param TagKeys A tag key or keys for which you want to return all matching event
#' notification subscriptions that are associated with the specified key or
#' keys. For example, suppose that you have subscriptions that are tagged
#' with keys called `owner` and `environment`. If you specify both of these
#' tag keys in the request, Amazon Redshift returns a response with the
#' subscriptions that have either or both of these tag keys associated with
#' them.
#' @param TagValues A tag value or values for which you want to return all matching event
#' notification subscriptions that are associated with the specified tag
#' value or values. For example, suppose that you have subscriptions that
#' are tagged with values called `admin` and `test`. If you specify both of
#' these tag values in the request, Amazon Redshift returns a response with
#' the subscriptions that have either or both of these tag values
#' associated with them.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Marker = "string",
#' EventSubscriptionsList = list(
#' list(
#' CustomerAwsId = "string",
#' CustSubscriptionId = "string",
#' SnsTopicArn = "string",
#' Status = "string",
#' SubscriptionCreationTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' SourceType = "string",
#' SourceIdsList = list(
#' "string"
#' ),
#' EventCategoriesList = list(
#' "string"
#' ),
#' Severity = "string",
#' Enabled = TRUE|FALSE,
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_event_subscriptions(
#' SubscriptionName = "string",
#' MaxRecords = 123,
#' Marker = "string",
#' TagKeys = list(
#' "string"
#' ),
#' TagValues = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_describe_event_subscriptions
redshift_describe_event_subscriptions <- function(SubscriptionName = NULL, MaxRecords = NULL, Marker = NULL, TagKeys = NULL, TagValues = NULL) {
op <- new_operation(
name = "DescribeEventSubscriptions",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_event_subscriptions_input(SubscriptionName = SubscriptionName, MaxRecords = MaxRecords, Marker = Marker, TagKeys = TagKeys, TagValues = TagValues)
output <- .redshift$describe_event_subscriptions_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_event_subscriptions <- redshift_describe_event_subscriptions
#' Returns events related to clusters, security groups, snapshots, and
#' parameter groups for the past 14 days
#'
#' @description
#' Returns events related to clusters, security groups, snapshots, and
#' parameter groups for the past 14 days. Events specific to a particular
#' cluster, security group, snapshot or parameter group can be obtained by
#' providing the name as a parameter. By default, the past hour of events
#' are returned.
#'
#' @usage
#' redshift_describe_events(SourceIdentifier, SourceType, StartTime,
#' EndTime, Duration, MaxRecords, Marker)
#'
#' @param SourceIdentifier The identifier of the event source for which events will be returned. If
#' this parameter is not specified, then all sources are included in the
#' response.
#'
#' Constraints:
#'
#' If *SourceIdentifier* is supplied, *SourceType* must also be provided.
#'
#' - Specify a cluster identifier when *SourceType* is `cluster`.
#'
#' - Specify a cluster security group name when *SourceType* is
#' `cluster-security-group`.
#'
#' - Specify a cluster parameter group name when *SourceType* is
#' `cluster-parameter-group`.
#'
#' - Specify a cluster snapshot identifier when *SourceType* is
#' `cluster-snapshot`.
#' @param SourceType The event source to retrieve events for. If no value is specified, all
#' events are returned.
#'
#' Constraints:
#'
#' If *SourceType* is supplied, *SourceIdentifier* must also be provided.
#'
#' - Specify `cluster` when *SourceIdentifier* is a cluster identifier.
#'
#' - Specify `cluster-security-group` when *SourceIdentifier* is a
#' cluster security group name.
#'
#' - Specify `cluster-parameter-group` when *SourceIdentifier* is a
#' cluster parameter group name.
#'
#' - Specify `cluster-snapshot` when *SourceIdentifier* is a cluster
#' snapshot identifier.
#' @param StartTime The beginning of the time interval to retrieve events for, specified in
#' ISO 8601 format. For more information about ISO 8601, go to the [ISO8601
#' Wikipedia page.](https://en.wikipedia.org/wiki/ISO_8601)
#'
#' Example: `2009-07-08T18:00Z`
#' @param EndTime The end of the time interval for which to retrieve events, specified in
#' ISO 8601 format. For more information about ISO 8601, go to the [ISO8601
#' Wikipedia page.](https://en.wikipedia.org/wiki/ISO_8601)
#'
#' Example: `2009-07-08T18:00Z`
#' @param Duration The number of minutes prior to the time of the request for which to
#' retrieve events. For example, if the request is sent at 18:00 and you
#' specify a duration of 60, then only events which have occurred after
#' 17:00 will be returned.
#'
#' Default: `60`
#' @param MaxRecords The maximum number of response records to return in each call. If the
#' number of remaining response records exceeds the specified `MaxRecords`
#' value, a value is returned in a `marker` field of the response. You can
#' retrieve the next set of records by retrying the command with the
#' returned marker value.
#'
#' Default: `100`
#'
#' Constraints: minimum 20, maximum 100.
#' @param Marker An optional parameter that specifies the starting point to return a set
#' of response records. When the results of a
#' [`describe_events`][redshift_describe_events] request exceed the value
#' specified in `MaxRecords`, AWS returns a value in the `Marker` field of
#' the response. You can retrieve the next set of response records by
#' providing the returned marker value in the `Marker` parameter and
#' retrying the request.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Marker = "string",
#' Events = list(
#' list(
#' SourceIdentifier = "string",
#' SourceType = "cluster"|"cluster-parameter-group"|"cluster-security-group"|"cluster-snapshot"|"scheduled-action",
#' Message = "string",
#' EventCategories = list(
#' "string"
#' ),
#' Severity = "string",
#' Date = as.POSIXct(
#' "2015-01-01"
#' ),
#' EventId = "string"
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_events(
#' SourceIdentifier = "string",
#' SourceType = "cluster"|"cluster-parameter-group"|"cluster-security-group"|"cluster-snapshot"|"scheduled-action",
#' StartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' EndTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' Duration = 123,
#' MaxRecords = 123,
#' Marker = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_describe_events
redshift_describe_events <- function(SourceIdentifier = NULL, SourceType = NULL, StartTime = NULL, EndTime = NULL, Duration = NULL, MaxRecords = NULL, Marker = NULL) {
op <- new_operation(
name = "DescribeEvents",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_events_input(SourceIdentifier = SourceIdentifier, SourceType = SourceType, StartTime = StartTime, EndTime = EndTime, Duration = Duration, MaxRecords = MaxRecords, Marker = Marker)
output <- .redshift$describe_events_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_events <- redshift_describe_events
#' Returns information about the specified HSM client certificate
#'
#' @description
#' Returns information about the specified HSM client certificate. If no
#' certificate ID is specified, returns information about all the HSM
#' certificates owned by your AWS customer account.
#'
#' If you specify both tag keys and tag values in the same request, Amazon
#' Redshift returns all HSM client certificates that match any combination
#' of the specified keys and values. For example, if you have `owner` and
#' `environment` for tag keys, and `admin` and `test` for tag values, all
#' HSM client certificates that have any combination of those values are
#' returned.
#'
#' If both tag keys and values are omitted from the request, HSM client
#' certificates are returned regardless of whether they have tag keys or
#' values associated with them.
#'
#' @usage
#' redshift_describe_hsm_client_certificates(
#' HsmClientCertificateIdentifier, MaxRecords, Marker, TagKeys, TagValues)
#'
#' @param HsmClientCertificateIdentifier The identifier of a specific HSM client certificate for which you want
#' information. If no identifier is specified, information is returned for
#' all HSM client certificates owned by your AWS customer account.
#' @param MaxRecords The maximum number of response records to return in each call. If the
#' number of remaining response records exceeds the specified `MaxRecords`
#' value, a value is returned in a `marker` field of the response. You can
#' retrieve the next set of records by retrying the command with the
#' returned marker value.
#'
#' Default: `100`
#'
#' Constraints: minimum 20, maximum 100.
#' @param Marker An optional parameter that specifies the starting point to return a set
#' of response records. When the results of a
#' [`describe_hsm_client_certificates`][redshift_describe_hsm_client_certificates]
#' request exceed the value specified in `MaxRecords`, AWS returns a value
#' in the `Marker` field of the response. You can retrieve the next set of
#' response records by providing the returned marker value in the `Marker`
#' parameter and retrying the request.
#' @param TagKeys A tag key or keys for which you want to return all matching HSM client
#' certificates that are associated with the specified key or keys. For
#' example, suppose that you have HSM client certificates that are tagged
#' with keys called `owner` and `environment`. If you specify both of these
#' tag keys in the request, Amazon Redshift returns a response with the HSM
#' client certificates that have either or both of these tag keys
#' associated with them.
#' @param TagValues A tag value or values for which you want to return all matching HSM
#' client certificates that are associated with the specified tag value or
#' values. For example, suppose that you have HSM client certificates that
#' are tagged with values called `admin` and `test`. If you specify both of
#' these tag values in the request, Amazon Redshift returns a response with
#' the HSM client certificates that have either or both of these tag values
#' associated with them.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Marker = "string",
#' HsmClientCertificates = list(
#' list(
#' HsmClientCertificateIdentifier = "string",
#' HsmClientCertificatePublicKey = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_hsm_client_certificates(
#' HsmClientCertificateIdentifier = "string",
#' MaxRecords = 123,
#' Marker = "string",
#' TagKeys = list(
#' "string"
#' ),
#' TagValues = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_describe_hsm_client_certificates
redshift_describe_hsm_client_certificates <- function(HsmClientCertificateIdentifier = NULL, MaxRecords = NULL, Marker = NULL, TagKeys = NULL, TagValues = NULL) {
op <- new_operation(
name = "DescribeHsmClientCertificates",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_hsm_client_certificates_input(HsmClientCertificateIdentifier = HsmClientCertificateIdentifier, MaxRecords = MaxRecords, Marker = Marker, TagKeys = TagKeys, TagValues = TagValues)
output <- .redshift$describe_hsm_client_certificates_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_hsm_client_certificates <- redshift_describe_hsm_client_certificates
#' Returns information about the specified Amazon Redshift HSM
#' configuration
#'
#' @description
#' Returns information about the specified Amazon Redshift HSM
#' configuration. If no configuration ID is specified, returns information
#' about all the HSM configurations owned by your AWS customer account.
#'
#' If you specify both tag keys and tag values in the same request, Amazon
#' Redshift returns all HSM connections that match any combination of the
#' specified keys and values. For example, if you have `owner` and
#' `environment` for tag keys, and `admin` and `test` for tag values, all
#' HSM connections that have any combination of those values are returned.
#'
#' If both tag keys and values are omitted from the request, HSM
#' connections are returned regardless of whether they have tag keys or
#' values associated with them.
#'
#' @usage
#' redshift_describe_hsm_configurations(HsmConfigurationIdentifier,
#' MaxRecords, Marker, TagKeys, TagValues)
#'
#' @param HsmConfigurationIdentifier The identifier of a specific Amazon Redshift HSM configuration to be
#' described. If no identifier is specified, information is returned for
#' all HSM configurations owned by your AWS customer account.
#' @param MaxRecords The maximum number of response records to return in each call. If the
#' number of remaining response records exceeds the specified `MaxRecords`
#' value, a value is returned in a `marker` field of the response. You can
#' retrieve the next set of records by retrying the command with the
#' returned marker value.
#'
#' Default: `100`
#'
#' Constraints: minimum 20, maximum 100.
#' @param Marker An optional parameter that specifies the starting point to return a set
#' of response records. When the results of a
#' [`describe_hsm_configurations`][redshift_describe_hsm_configurations]
#' request exceed the value specified in `MaxRecords`, AWS returns a value
#' in the `Marker` field of the response. You can retrieve the next set of
#' response records by providing the returned marker value in the `Marker`
#' parameter and retrying the request.
#' @param TagKeys A tag key or keys for which you want to return all matching HSM
#' configurations that are associated with the specified key or keys. For
#' example, suppose that you have HSM configurations that are tagged with
#' keys called `owner` and `environment`. If you specify both of these tag
#' keys in the request, Amazon Redshift returns a response with the HSM
#' configurations that have either or both of these tag keys associated
#' with them.
#' @param TagValues A tag value or values for which you want to return all matching HSM
#' configurations that are associated with the specified tag value or
#' values. For example, suppose that you have HSM configurations that are
#' tagged with values called `admin` and `test`. If you specify both of
#' these tag values in the request, Amazon Redshift returns a response with
#' the HSM configurations that have either or both of these tag values
#' associated with them.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Marker = "string",
#' HsmConfigurations = list(
#' list(
#' HsmConfigurationIdentifier = "string",
#' Description = "string",
#' HsmIpAddress = "string",
#' HsmPartitionName = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_hsm_configurations(
#' HsmConfigurationIdentifier = "string",
#' MaxRecords = 123,
#' Marker = "string",
#' TagKeys = list(
#' "string"
#' ),
#' TagValues = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_describe_hsm_configurations
redshift_describe_hsm_configurations <- function(HsmConfigurationIdentifier = NULL, MaxRecords = NULL, Marker = NULL, TagKeys = NULL, TagValues = NULL) {
op <- new_operation(
name = "DescribeHsmConfigurations",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_hsm_configurations_input(HsmConfigurationIdentifier = HsmConfigurationIdentifier, MaxRecords = MaxRecords, Marker = Marker, TagKeys = TagKeys, TagValues = TagValues)
output <- .redshift$describe_hsm_configurations_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_hsm_configurations <- redshift_describe_hsm_configurations
#' Describes whether information, such as queries and connection attempts,
#' is being logged for the specified Amazon Redshift cluster
#'
#' @description
#' Describes whether information, such as queries and connection attempts,
#' is being logged for the specified Amazon Redshift cluster.
#'
#' @usage
#' redshift_describe_logging_status(ClusterIdentifier)
#'
#' @param ClusterIdentifier [required] The identifier of the cluster from which to get the logging status.
#'
#' Example: `examplecluster`
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' LoggingEnabled = TRUE|FALSE,
#' BucketName = "string",
#' S3KeyPrefix = "string",
#' LastSuccessfulDeliveryTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' LastFailureTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' LastFailureMessage = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_logging_status(
#' ClusterIdentifier = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_describe_logging_status
redshift_describe_logging_status <- function(ClusterIdentifier) {
op <- new_operation(
name = "DescribeLoggingStatus",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_logging_status_input(ClusterIdentifier = ClusterIdentifier)
output <- .redshift$describe_logging_status_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_logging_status <- redshift_describe_logging_status
#' Returns properties of possible node configurations such as node type,
#' number of nodes, and disk usage for the specified action type
#'
#' @description
#' Returns properties of possible node configurations such as node type,
#' number of nodes, and disk usage for the specified action type.
#'
#' @usage
#' redshift_describe_node_configuration_options(ActionType,
#' ClusterIdentifier, SnapshotIdentifier, OwnerAccount, Filters, Marker,
#' MaxRecords)
#'
#' @param ActionType [required] The action type to evaluate for possible node configurations. Specify
#' "restore-cluster" to get configuration combinations based on an existing
#' snapshot. Specify "recommend-node-config" to get configuration
#' recommendations based on an existing cluster or snapshot. Specify
#' "resize-cluster" to get configuration combinations for elastic resize
#' based on an existing cluster.
#' @param ClusterIdentifier The identifier of the cluster to evaluate for possible node
#' configurations.
#' @param SnapshotIdentifier The identifier of the snapshot to evaluate for possible node
#' configurations.
#' @param OwnerAccount The AWS customer account used to create or copy the snapshot. Required
#' if you are restoring a snapshot you do not own, optional if you own the
#' snapshot.
#' @param Filters A set of name, operator, and value items to filter the results.
#' @param Marker An optional parameter that specifies the starting point to return a set
#' of response records. When the results of a
#' [`describe_node_configuration_options`][redshift_describe_node_configuration_options]
#' request exceed the value specified in `MaxRecords`, AWS returns a value
#' in the `Marker` field of the response. You can retrieve the next set of
#' response records by providing the returned marker value in the `Marker`
#' parameter and retrying the request.
#' @param MaxRecords The maximum number of response records to return in each call. If the
#' number of remaining response records exceeds the specified `MaxRecords`
#' value, a value is returned in a `marker` field of the response. You can
#' retrieve the next set of records by retrying the command with the
#' returned marker value.
#'
#' Default: `500`
#'
#' Constraints: minimum 100, maximum 500.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' NodeConfigurationOptionList = list(
#' list(
#' NodeType = "string",
#' NumberOfNodes = 123,
#' EstimatedDiskUtilizationPercent = 123.0,
#' Mode = "standard"|"high-performance"
#' )
#' ),
#' Marker = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_node_configuration_options(
#' ActionType = "restore-cluster"|"recommend-node-config"|"resize-cluster",
#' ClusterIdentifier = "string",
#' SnapshotIdentifier = "string",
#' OwnerAccount = "string",
#' Filters = list(
#' list(
#' Name = "NodeType"|"NumberOfNodes"|"EstimatedDiskUtilizationPercent"|"Mode",
#' Operator = "eq"|"lt"|"gt"|"le"|"ge"|"in"|"between",
#' Values = list(
#' "string"
#' )
#' )
#' ),
#' Marker = "string",
#' MaxRecords = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_describe_node_configuration_options
redshift_describe_node_configuration_options <- function(ActionType, ClusterIdentifier = NULL, SnapshotIdentifier = NULL, OwnerAccount = NULL, Filters = NULL, Marker = NULL, MaxRecords = NULL) {
op <- new_operation(
name = "DescribeNodeConfigurationOptions",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_node_configuration_options_input(ActionType = ActionType, ClusterIdentifier = ClusterIdentifier, SnapshotIdentifier = SnapshotIdentifier, OwnerAccount = OwnerAccount, Filters = Filters, Marker = Marker, MaxRecords = MaxRecords)
output <- .redshift$describe_node_configuration_options_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_node_configuration_options <- redshift_describe_node_configuration_options
#' Returns a list of orderable cluster options
#'
#' @description
#' Returns a list of orderable cluster options. Before you create a new
#' cluster you can use this operation to find what options are available,
#' such as the EC2 Availability Zones (AZ) in the specific AWS Region that
#' you can specify, and the node types you can request. The node types
#' differ by available storage, memory, CPU and price. With the cost
#' involved you might want to obtain a list of cluster options in the
#' specific region and specify values when creating a cluster. For more
#' information about managing clusters, go to [Amazon Redshift
#' Clusters](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' @usage
#' redshift_describe_orderable_cluster_options(ClusterVersion, NodeType,
#' MaxRecords, Marker)
#'
#' @param ClusterVersion The version filter value. Specify this parameter to show only the
#' available offerings matching the specified version.
#'
#' Default: All versions.
#'
#' Constraints: Must be one of the version returned from
#' [`describe_cluster_versions`][redshift_describe_cluster_versions].
#' @param NodeType The node type filter value. Specify this parameter to show only the
#' available offerings matching the specified node type.
#' @param MaxRecords The maximum number of response records to return in each call. If the
#' number of remaining response records exceeds the specified `MaxRecords`
#' value, a value is returned in a `marker` field of the response. You can
#' retrieve the next set of records by retrying the command with the
#' returned marker value.
#'
#' Default: `100`
#'
#' Constraints: minimum 20, maximum 100.
#' @param Marker An optional parameter that specifies the starting point to return a set
#' of response records. When the results of a
#' [`describe_orderable_cluster_options`][redshift_describe_orderable_cluster_options]
#' request exceed the value specified in `MaxRecords`, AWS returns a value
#' in the `Marker` field of the response. You can retrieve the next set of
#' response records by providing the returned marker value in the `Marker`
#' parameter and retrying the request.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' OrderableClusterOptions = list(
#' list(
#' ClusterVersion = "string",
#' ClusterType = "string",
#' NodeType = "string",
#' AvailabilityZones = list(
#' list(
#' Name = "string",
#' SupportedPlatforms = list(
#' list(
#' Name = "string"
#' )
#' )
#' )
#' )
#' )
#' ),
#' Marker = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_orderable_cluster_options(
#' ClusterVersion = "string",
#' NodeType = "string",
#' MaxRecords = 123,
#' Marker = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_describe_orderable_cluster_options
redshift_describe_orderable_cluster_options <- function(ClusterVersion = NULL, NodeType = NULL, MaxRecords = NULL, Marker = NULL) {
op <- new_operation(
name = "DescribeOrderableClusterOptions",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_orderable_cluster_options_input(ClusterVersion = ClusterVersion, NodeType = NodeType, MaxRecords = MaxRecords, Marker = Marker)
output <- .redshift$describe_orderable_cluster_options_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_orderable_cluster_options <- redshift_describe_orderable_cluster_options
#' Returns a list of the available reserved node offerings by Amazon
#' Redshift with their descriptions including the node type, the fixed and
#' recurring costs of reserving the node and duration the node will be
#' reserved for you
#'
#' @description
#' Returns a list of the available reserved node offerings by Amazon
#' Redshift with their descriptions including the node type, the fixed and
#' recurring costs of reserving the node and duration the node will be
#' reserved for you. These descriptions help you determine which reserve
#' node offering you want to purchase. You then use the unique offering ID
#' in you call to
#' [`purchase_reserved_node_offering`][redshift_purchase_reserved_node_offering]
#' to reserve one or more nodes for your Amazon Redshift cluster.
#'
#' For more information about reserved node offerings, go to [Purchasing
#' Reserved
#' Nodes](https://docs.aws.amazon.com/redshift/latest/mgmt/purchase-reserved-node-instance.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' @usage
#' redshift_describe_reserved_node_offerings(ReservedNodeOfferingId,
#' MaxRecords, Marker)
#'
#' @param ReservedNodeOfferingId The unique identifier for the offering.
#' @param MaxRecords The maximum number of response records to return in each call. If the
#' number of remaining response records exceeds the specified `MaxRecords`
#' value, a value is returned in a `marker` field of the response. You can
#' retrieve the next set of records by retrying the command with the
#' returned marker value.
#'
#' Default: `100`
#'
#' Constraints: minimum 20, maximum 100.
#' @param Marker An optional parameter that specifies the starting point to return a set
#' of response records. When the results of a
#' [`describe_reserved_node_offerings`][redshift_describe_reserved_node_offerings]
#' request exceed the value specified in `MaxRecords`, AWS returns a value
#' in the `Marker` field of the response. You can retrieve the next set of
#' response records by providing the returned marker value in the `Marker`
#' parameter and retrying the request.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Marker = "string",
#' ReservedNodeOfferings = list(
#' list(
#' ReservedNodeOfferingId = "string",
#' NodeType = "string",
#' Duration = 123,
#' FixedPrice = 123.0,
#' UsagePrice = 123.0,
#' CurrencyCode = "string",
#' OfferingType = "string",
#' RecurringCharges = list(
#' list(
#' RecurringChargeAmount = 123.0,
#' RecurringChargeFrequency = "string"
#' )
#' ),
#' ReservedNodeOfferingType = "Regular"|"Upgradable"
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_reserved_node_offerings(
#' ReservedNodeOfferingId = "string",
#' MaxRecords = 123,
#' Marker = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_describe_reserved_node_offerings
redshift_describe_reserved_node_offerings <- function(ReservedNodeOfferingId = NULL, MaxRecords = NULL, Marker = NULL) {
op <- new_operation(
name = "DescribeReservedNodeOfferings",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_reserved_node_offerings_input(ReservedNodeOfferingId = ReservedNodeOfferingId, MaxRecords = MaxRecords, Marker = Marker)
output <- .redshift$describe_reserved_node_offerings_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_reserved_node_offerings <- redshift_describe_reserved_node_offerings
#' Returns the descriptions of the reserved nodes
#'
#' @description
#' Returns the descriptions of the reserved nodes.
#'
#' @usage
#' redshift_describe_reserved_nodes(ReservedNodeId, MaxRecords, Marker)
#'
#' @param ReservedNodeId Identifier for the node reservation.
#' @param MaxRecords The maximum number of response records to return in each call. If the
#' number of remaining response records exceeds the specified `MaxRecords`
#' value, a value is returned in a `marker` field of the response. You can
#' retrieve the next set of records by retrying the command with the
#' returned marker value.
#'
#' Default: `100`
#'
#' Constraints: minimum 20, maximum 100.
#' @param Marker An optional parameter that specifies the starting point to return a set
#' of response records. When the results of a
#' [`describe_reserved_nodes`][redshift_describe_reserved_nodes] request
#' exceed the value specified in `MaxRecords`, AWS returns a value in the
#' `Marker` field of the response. You can retrieve the next set of
#' response records by providing the returned marker value in the `Marker`
#' parameter and retrying the request.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Marker = "string",
#' ReservedNodes = list(
#' list(
#' ReservedNodeId = "string",
#' ReservedNodeOfferingId = "string",
#' NodeType = "string",
#' StartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' Duration = 123,
#' FixedPrice = 123.0,
#' UsagePrice = 123.0,
#' CurrencyCode = "string",
#' NodeCount = 123,
#' State = "string",
#' OfferingType = "string",
#' RecurringCharges = list(
#' list(
#' RecurringChargeAmount = 123.0,
#' RecurringChargeFrequency = "string"
#' )
#' ),
#' ReservedNodeOfferingType = "Regular"|"Upgradable"
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_reserved_nodes(
#' ReservedNodeId = "string",
#' MaxRecords = 123,
#' Marker = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_describe_reserved_nodes
redshift_describe_reserved_nodes <- function(ReservedNodeId = NULL, MaxRecords = NULL, Marker = NULL) {
op <- new_operation(
name = "DescribeReservedNodes",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_reserved_nodes_input(ReservedNodeId = ReservedNodeId, MaxRecords = MaxRecords, Marker = Marker)
output <- .redshift$describe_reserved_nodes_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_reserved_nodes <- redshift_describe_reserved_nodes
#' Returns information about the last resize operation for the specified
#' cluster
#'
#' @description
#' Returns information about the last resize operation for the specified
#' cluster. If no resize operation has ever been initiated for the
#' specified cluster, a `HTTP 404` error is returned. If a resize operation
#' was initiated and completed, the status of the resize remains as
#' `SUCCEEDED` until the next resize.
#'
#' A resize operation can be requested using
#' [`modify_cluster`][redshift_modify_cluster] and specifying a different
#' number or type of nodes for the cluster.
#'
#' @usage
#' redshift_describe_resize(ClusterIdentifier)
#'
#' @param ClusterIdentifier [required] The unique identifier of a cluster whose resize progress you are
#' requesting. This parameter is case-sensitive.
#'
#' By default, resize operations for all clusters defined for an AWS
#' account are returned.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' TargetNodeType = "string",
#' TargetNumberOfNodes = 123,
#' TargetClusterType = "string",
#' Status = "string",
#' ImportTablesCompleted = list(
#' "string"
#' ),
#' ImportTablesInProgress = list(
#' "string"
#' ),
#' ImportTablesNotStarted = list(
#' "string"
#' ),
#' AvgResizeRateInMegaBytesPerSecond = 123.0,
#' TotalResizeDataInMegaBytes = 123,
#' ProgressInMegaBytes = 123,
#' ElapsedTimeInSeconds = 123,
#' EstimatedTimeToCompletionInSeconds = 123,
#' ResizeType = "string",
#' Message = "string",
#' TargetEncryptionType = "string",
#' DataTransferProgressPercent = 123.0
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_resize(
#' ClusterIdentifier = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_describe_resize
redshift_describe_resize <- function(ClusterIdentifier) {
op <- new_operation(
name = "DescribeResize",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_resize_input(ClusterIdentifier = ClusterIdentifier)
output <- .redshift$describe_resize_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_resize <- redshift_describe_resize
#' Describes properties of scheduled actions
#'
#' @description
#' Describes properties of scheduled actions.
#'
#' @usage
#' redshift_describe_scheduled_actions(ScheduledActionName,
#' TargetActionType, StartTime, EndTime, Active, Filters, Marker,
#' MaxRecords)
#'
#' @param ScheduledActionName The name of the scheduled action to retrieve.
#' @param TargetActionType The type of the scheduled actions to retrieve.
#' @param StartTime The start time in UTC of the scheduled actions to retrieve. Only active
#' scheduled actions that have invocations after this time are retrieved.
#' @param EndTime The end time in UTC of the scheduled action to retrieve. Only active
#' scheduled actions that have invocations before this time are retrieved.
#' @param Active If true, retrieve only active scheduled actions. If false, retrieve only
#' disabled scheduled actions.
#' @param Filters List of scheduled action filters.
#' @param Marker An optional parameter that specifies the starting point to return a set
#' of response records. When the results of a
#' [`describe_scheduled_actions`][redshift_describe_scheduled_actions]
#' request exceed the value specified in `MaxRecords`, AWS returns a value
#' in the `Marker` field of the response. You can retrieve the next set of
#' response records by providing the returned marker value in the `Marker`
#' parameter and retrying the request.
#' @param MaxRecords The maximum number of response records to return in each call. If the
#' number of remaining response records exceeds the specified `MaxRecords`
#' value, a value is returned in a `marker` field of the response. You can
#' retrieve the next set of records by retrying the command with the
#' returned marker value.
#'
#' Default: `100`
#'
#' Constraints: minimum 20, maximum 100.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Marker = "string",
#' ScheduledActions = list(
#' list(
#' ScheduledActionName = "string",
#' TargetAction = list(
#' ResizeCluster = list(
#' ClusterIdentifier = "string",
#' ClusterType = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' Classic = TRUE|FALSE
#' ),
#' PauseCluster = list(
#' ClusterIdentifier = "string"
#' ),
#' ResumeCluster = list(
#' ClusterIdentifier = "string"
#' )
#' ),
#' Schedule = "string",
#' IamRole = "string",
#' ScheduledActionDescription = "string",
#' State = "ACTIVE"|"DISABLED",
#' NextInvocations = list(
#' as.POSIXct(
#' "2015-01-01"
#' )
#' ),
#' StartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' EndTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_scheduled_actions(
#' ScheduledActionName = "string",
#' TargetActionType = "ResizeCluster"|"PauseCluster"|"ResumeCluster",
#' StartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' EndTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' Active = TRUE|FALSE,
#' Filters = list(
#' list(
#' Name = "cluster-identifier"|"iam-role",
#' Values = list(
#' "string"
#' )
#' )
#' ),
#' Marker = "string",
#' MaxRecords = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_describe_scheduled_actions
redshift_describe_scheduled_actions <- function(ScheduledActionName = NULL, TargetActionType = NULL, StartTime = NULL, EndTime = NULL, Active = NULL, Filters = NULL, Marker = NULL, MaxRecords = NULL) {
op <- new_operation(
name = "DescribeScheduledActions",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_scheduled_actions_input(ScheduledActionName = ScheduledActionName, TargetActionType = TargetActionType, StartTime = StartTime, EndTime = EndTime, Active = Active, Filters = Filters, Marker = Marker, MaxRecords = MaxRecords)
output <- .redshift$describe_scheduled_actions_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_scheduled_actions <- redshift_describe_scheduled_actions
#' Returns a list of snapshot copy grants owned by the AWS account in the
#' destination region
#'
#' @description
#' Returns a list of snapshot copy grants owned by the AWS account in the
#' destination region.
#'
#' For more information about managing snapshot copy grants, go to [Amazon
#' Redshift Database
#' Encryption](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-db-encryption.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' @usage
#' redshift_describe_snapshot_copy_grants(SnapshotCopyGrantName,
#' MaxRecords, Marker, TagKeys, TagValues)
#'
#' @param SnapshotCopyGrantName The name of the snapshot copy grant.
#' @param MaxRecords The maximum number of response records to return in each call. If the
#' number of remaining response records exceeds the specified `MaxRecords`
#' value, a value is returned in a `marker` field of the response. You can
#' retrieve the next set of records by retrying the command with the
#' returned marker value.
#'
#' Default: `100`
#'
#' Constraints: minimum 20, maximum 100.
#' @param Marker An optional parameter that specifies the starting point to return a set
#' of response records. When the results of a `DescribeSnapshotCopyGrant`
#' request exceed the value specified in `MaxRecords`, AWS returns a value
#' in the `Marker` field of the response. You can retrieve the next set of
#' response records by providing the returned marker value in the `Marker`
#' parameter and retrying the request.
#'
#' Constraints: You can specify either the **SnapshotCopyGrantName**
#' parameter or the **Marker** parameter, but not both.
#' @param TagKeys A tag key or keys for which you want to return all matching resources
#' that are associated with the specified key or keys. For example, suppose
#' that you have resources tagged with keys called `owner` and
#' `environment`. If you specify both of these tag keys in the request,
#' Amazon Redshift returns a response with all resources that have either
#' or both of these tag keys associated with them.
#' @param TagValues A tag value or values for which you want to return all matching
#' resources that are associated with the specified value or values. For
#' example, suppose that you have resources tagged with values called
#' `admin` and `test`. If you specify both of these tag values in the
#' request, Amazon Redshift returns a response with all resources that have
#' either or both of these tag values associated with them.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Marker = "string",
#' SnapshotCopyGrants = list(
#' list(
#' SnapshotCopyGrantName = "string",
#' KmsKeyId = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_snapshot_copy_grants(
#' SnapshotCopyGrantName = "string",
#' MaxRecords = 123,
#' Marker = "string",
#' TagKeys = list(
#' "string"
#' ),
#' TagValues = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_describe_snapshot_copy_grants
redshift_describe_snapshot_copy_grants <- function(SnapshotCopyGrantName = NULL, MaxRecords = NULL, Marker = NULL, TagKeys = NULL, TagValues = NULL) {
op <- new_operation(
name = "DescribeSnapshotCopyGrants",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_snapshot_copy_grants_input(SnapshotCopyGrantName = SnapshotCopyGrantName, MaxRecords = MaxRecords, Marker = Marker, TagKeys = TagKeys, TagValues = TagValues)
output <- .redshift$describe_snapshot_copy_grants_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_snapshot_copy_grants <- redshift_describe_snapshot_copy_grants
#' Returns a list of snapshot schedules
#'
#' @description
#' Returns a list of snapshot schedules.
#'
#' @usage
#' redshift_describe_snapshot_schedules(ClusterIdentifier,
#' ScheduleIdentifier, TagKeys, TagValues, Marker, MaxRecords)
#'
#' @param ClusterIdentifier The unique identifier for the cluster whose snapshot schedules you want
#' to view.
#' @param ScheduleIdentifier A unique identifier for a snapshot schedule.
#' @param TagKeys The key value for a snapshot schedule tag.
#' @param TagValues The value corresponding to the key of the snapshot schedule tag.
#' @param Marker A value that indicates the starting point for the next set of response
#' records in a subsequent request. If a value is returned in a response,
#' you can retrieve the next set of records by providing this returned
#' marker value in the `marker` parameter and retrying the command. If the
#' `marker` field is empty, all response records have been retrieved for
#' the request.
#' @param MaxRecords The maximum number or response records to return in each call. If the
#' number of remaining response records exceeds the specified `MaxRecords`
#' value, a value is returned in a `marker` field of the response. You can
#' retrieve the next set of records by retrying the command with the
#' returned `marker` value.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' SnapshotSchedules = list(
#' list(
#' ScheduleDefinitions = list(
#' "string"
#' ),
#' ScheduleIdentifier = "string",
#' ScheduleDescription = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' ),
#' NextInvocations = list(
#' as.POSIXct(
#' "2015-01-01"
#' )
#' ),
#' AssociatedClusterCount = 123,
#' AssociatedClusters = list(
#' list(
#' ClusterIdentifier = "string",
#' ScheduleAssociationState = "MODIFYING"|"ACTIVE"|"FAILED"
#' )
#' )
#' )
#' ),
#' Marker = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_snapshot_schedules(
#' ClusterIdentifier = "string",
#' ScheduleIdentifier = "string",
#' TagKeys = list(
#' "string"
#' ),
#' TagValues = list(
#' "string"
#' ),
#' Marker = "string",
#' MaxRecords = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_describe_snapshot_schedules
redshift_describe_snapshot_schedules <- function(ClusterIdentifier = NULL, ScheduleIdentifier = NULL, TagKeys = NULL, TagValues = NULL, Marker = NULL, MaxRecords = NULL) {
op <- new_operation(
name = "DescribeSnapshotSchedules",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_snapshot_schedules_input(ClusterIdentifier = ClusterIdentifier, ScheduleIdentifier = ScheduleIdentifier, TagKeys = TagKeys, TagValues = TagValues, Marker = Marker, MaxRecords = MaxRecords)
output <- .redshift$describe_snapshot_schedules_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_snapshot_schedules <- redshift_describe_snapshot_schedules
#' Returns account level backups storage size and provisional storage
#'
#' @description
#' Returns account level backups storage size and provisional storage.
#'
#' @usage
#' redshift_describe_storage()
#'
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' TotalBackupSizeInMegaBytes = 123.0,
#' TotalProvisionedStorageInMegaBytes = 123.0
#' )
#' ```
#'
#'
#' @keywords internal
#'
#' @rdname redshift_describe_storage
redshift_describe_storage <- function() {
op <- new_operation(
name = "DescribeStorage",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_storage_input()
output <- .redshift$describe_storage_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_storage <- redshift_describe_storage
#' Lists the status of one or more table restore requests made using the
#' RestoreTableFromClusterSnapshot API action
#'
#' @description
#' Lists the status of one or more table restore requests made using the
#' [`restore_table_from_cluster_snapshot`][redshift_restore_table_from_cluster_snapshot]
#' API action. If you don't specify a value for the `TableRestoreRequestId`
#' parameter, then
#' [`describe_table_restore_status`][redshift_describe_table_restore_status]
#' returns the status of all table restore requests ordered by the date and
#' time of the request in ascending order. Otherwise
#' [`describe_table_restore_status`][redshift_describe_table_restore_status]
#' returns the status of the table specified by `TableRestoreRequestId`.
#'
#' @usage
#' redshift_describe_table_restore_status(ClusterIdentifier,
#' TableRestoreRequestId, MaxRecords, Marker)
#'
#' @param ClusterIdentifier The Amazon Redshift cluster that the table is being restored to.
#' @param TableRestoreRequestId The identifier of the table restore request to return status for. If you
#' don't specify a `TableRestoreRequestId` value, then
#' [`describe_table_restore_status`][redshift_describe_table_restore_status]
#' returns the status of all in-progress table restore requests.
#' @param MaxRecords The maximum number of records to include in the response. If more
#' records exist than the specified `MaxRecords` value, a pagination token
#' called a marker is included in the response so that the remaining
#' results can be retrieved.
#' @param Marker An optional pagination token provided by a previous
#' [`describe_table_restore_status`][redshift_describe_table_restore_status]
#' request. If this parameter is specified, the response includes only
#' records beyond the marker, up to the value specified by the `MaxRecords`
#' parameter.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' TableRestoreStatusDetails = list(
#' list(
#' TableRestoreRequestId = "string",
#' Status = "PENDING"|"IN_PROGRESS"|"SUCCEEDED"|"FAILED"|"CANCELED",
#' Message = "string",
#' RequestTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ProgressInMegaBytes = 123,
#' TotalDataInMegaBytes = 123,
#' ClusterIdentifier = "string",
#' SnapshotIdentifier = "string",
#' SourceDatabaseName = "string",
#' SourceSchemaName = "string",
#' SourceTableName = "string",
#' TargetDatabaseName = "string",
#' TargetSchemaName = "string",
#' NewTableName = "string"
#' )
#' ),
#' Marker = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_table_restore_status(
#' ClusterIdentifier = "string",
#' TableRestoreRequestId = "string",
#' MaxRecords = 123,
#' Marker = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_describe_table_restore_status
redshift_describe_table_restore_status <- function(ClusterIdentifier = NULL, TableRestoreRequestId = NULL, MaxRecords = NULL, Marker = NULL) {
op <- new_operation(
name = "DescribeTableRestoreStatus",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_table_restore_status_input(ClusterIdentifier = ClusterIdentifier, TableRestoreRequestId = TableRestoreRequestId, MaxRecords = MaxRecords, Marker = Marker)
output <- .redshift$describe_table_restore_status_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_table_restore_status <- redshift_describe_table_restore_status
#' Returns a list of tags
#'
#' @description
#' Returns a list of tags. You can return tags from a specific resource by
#' specifying an ARN, or you can return all tags for a given type of
#' resource, such as clusters, snapshots, and so on.
#'
#' The following are limitations for
#' [`describe_tags`][redshift_describe_tags]:
#'
#' - You cannot specify an ARN and a resource-type value together in the
#' same request.
#'
#' - You cannot use the `MaxRecords` and `Marker` parameters together
#' with the ARN parameter.
#'
#' - The `MaxRecords` parameter can be a range from 10 to 50 results to
#' return in a request.
#'
#' If you specify both tag keys and tag values in the same request, Amazon
#' Redshift returns all resources that match any combination of the
#' specified keys and values. For example, if you have `owner` and
#' `environment` for tag keys, and `admin` and `test` for tag values, all
#' resources that have any combination of those values are returned.
#'
#' If both tag keys and values are omitted from the request, resources are
#' returned regardless of whether they have tag keys or values associated
#' with them.
#'
#' @usage
#' redshift_describe_tags(ResourceName, ResourceType, MaxRecords, Marker,
#' TagKeys, TagValues)
#'
#' @param ResourceName The Amazon Resource Name (ARN) for which you want to describe the tag or
#' tags. For example, `arn:aws:redshift:us-east-2:123456789:cluster:t1`.
#' @param ResourceType The type of resource with which you want to view tags. Valid resource
#' types are:
#'
#' - Cluster
#'
#' - CIDR/IP
#'
#' - EC2 security group
#'
#' - Snapshot
#'
#' - Cluster security group
#'
#' - Subnet group
#'
#' - HSM connection
#'
#' - HSM certificate
#'
#' - Parameter group
#'
#' - Snapshot copy grant
#'
#' For more information about Amazon Redshift resource types and
#' constructing ARNs, go to [Specifying Policy Elements: Actions, Effects,
#' Resources, and
#' Principals](https://docs.aws.amazon.com/redshift/latest/mgmt/redshift-iam-access-control-overview.html#redshift-iam-access-control-specify-actions)
#' in the Amazon Redshift Cluster Management Guide.
#' @param MaxRecords The maximum number or response records to return in each call. If the
#' number of remaining response records exceeds the specified `MaxRecords`
#' value, a value is returned in a `marker` field of the response. You can
#' retrieve the next set of records by retrying the command with the
#' returned `marker` value.
#' @param Marker A value that indicates the starting point for the next set of response
#' records in a subsequent request. If a value is returned in a response,
#' you can retrieve the next set of records by providing this returned
#' marker value in the `marker` parameter and retrying the command. If the
#' `marker` field is empty, all response records have been retrieved for
#' the request.
#' @param TagKeys A tag key or keys for which you want to return all matching resources
#' that are associated with the specified key or keys. For example, suppose
#' that you have resources tagged with keys called `owner` and
#' `environment`. If you specify both of these tag keys in the request,
#' Amazon Redshift returns a response with all resources that have either
#' or both of these tag keys associated with them.
#' @param TagValues A tag value or values for which you want to return all matching
#' resources that are associated with the specified value or values. For
#' example, suppose that you have resources tagged with values called
#' `admin` and `test`. If you specify both of these tag values in the
#' request, Amazon Redshift returns a response with all resources that have
#' either or both of these tag values associated with them.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' TaggedResources = list(
#' list(
#' Tag = list(
#' Key = "string",
#' Value = "string"
#' ),
#' ResourceName = "string",
#' ResourceType = "string"
#' )
#' ),
#' Marker = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_tags(
#' ResourceName = "string",
#' ResourceType = "string",
#' MaxRecords = 123,
#' Marker = "string",
#' TagKeys = list(
#' "string"
#' ),
#' TagValues = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_describe_tags
redshift_describe_tags <- function(ResourceName = NULL, ResourceType = NULL, MaxRecords = NULL, Marker = NULL, TagKeys = NULL, TagValues = NULL) {
op <- new_operation(
name = "DescribeTags",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_tags_input(ResourceName = ResourceName, ResourceType = ResourceType, MaxRecords = MaxRecords, Marker = Marker, TagKeys = TagKeys, TagValues = TagValues)
output <- .redshift$describe_tags_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_tags <- redshift_describe_tags
#' Shows usage limits on a cluster
#'
#' @description
#' Shows usage limits on a cluster. Results are filtered based on the
#' combination of input usage limit identifier, cluster identifier, and
#' feature type parameters:
#'
#' - If usage limit identifier, cluster identifier, and feature type are
#' not provided, then all usage limit objects for the current account
#' in the current region are returned.
#'
#' - If usage limit identifier is provided, then the corresponding usage
#' limit object is returned.
#'
#' - If cluster identifier is provided, then all usage limit objects for
#' the specified cluster are returned.
#'
#' - If cluster identifier and feature type are provided, then all usage
#' limit objects for the combination of cluster and feature are
#' returned.
#'
#' @usage
#' redshift_describe_usage_limits(UsageLimitId, ClusterIdentifier,
#' FeatureType, MaxRecords, Marker, TagKeys, TagValues)
#'
#' @param UsageLimitId The identifier of the usage limit to describe.
#' @param ClusterIdentifier The identifier of the cluster for which you want to describe usage
#' limits.
#' @param FeatureType The feature type for which you want to describe usage limits.
#' @param MaxRecords The maximum number of response records to return in each call. If the
#' number of remaining response records exceeds the specified `MaxRecords`
#' value, a value is returned in a `marker` field of the response. You can
#' retrieve the next set of records by retrying the command with the
#' returned marker value.
#'
#' Default: `100`
#'
#' Constraints: minimum 20, maximum 100.
#' @param Marker An optional parameter that specifies the starting point to return a set
#' of response records. When the results of a
#' [`describe_usage_limits`][redshift_describe_usage_limits] request exceed
#' the value specified in `MaxRecords`, AWS returns a value in the `Marker`
#' field of the response. You can retrieve the next set of response records
#' by providing the returned marker value in the `Marker` parameter and
#' retrying the request.
#' @param TagKeys A tag key or keys for which you want to return all matching usage limit
#' objects that are associated with the specified key or keys. For example,
#' suppose that you have parameter groups that are tagged with keys called
#' `owner` and `environment`. If you specify both of these tag keys in the
#' request, Amazon Redshift returns a response with the usage limit objects
#' have either or both of these tag keys associated with them.
#' @param TagValues A tag value or values for which you want to return all matching usage
#' limit objects that are associated with the specified tag value or
#' values. For example, suppose that you have parameter groups that are
#' tagged with values called `admin` and `test`. If you specify both of
#' these tag values in the request, Amazon Redshift returns a response with
#' the usage limit objects that have either or both of these tag values
#' associated with them.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' UsageLimits = list(
#' list(
#' UsageLimitId = "string",
#' ClusterIdentifier = "string",
#' FeatureType = "spectrum"|"concurrency-scaling",
#' LimitType = "time"|"data-scanned",
#' Amount = 123,
#' Period = "daily"|"weekly"|"monthly",
#' BreachAction = "log"|"emit-metric"|"disable",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' ),
#' Marker = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_usage_limits(
#' UsageLimitId = "string",
#' ClusterIdentifier = "string",
#' FeatureType = "spectrum"|"concurrency-scaling",
#' MaxRecords = 123,
#' Marker = "string",
#' TagKeys = list(
#' "string"
#' ),
#' TagValues = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_describe_usage_limits
redshift_describe_usage_limits <- function(UsageLimitId = NULL, ClusterIdentifier = NULL, FeatureType = NULL, MaxRecords = NULL, Marker = NULL, TagKeys = NULL, TagValues = NULL) {
op <- new_operation(
name = "DescribeUsageLimits",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_usage_limits_input(UsageLimitId = UsageLimitId, ClusterIdentifier = ClusterIdentifier, FeatureType = FeatureType, MaxRecords = MaxRecords, Marker = Marker, TagKeys = TagKeys, TagValues = TagValues)
output <- .redshift$describe_usage_limits_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_usage_limits <- redshift_describe_usage_limits
#' Stops logging information, such as queries and connection attempts, for
#' the specified Amazon Redshift cluster
#'
#' @description
#' Stops logging information, such as queries and connection attempts, for
#' the specified Amazon Redshift cluster.
#'
#' @usage
#' redshift_disable_logging(ClusterIdentifier)
#'
#' @param ClusterIdentifier [required] The identifier of the cluster on which logging is to be stopped.
#'
#' Example: `examplecluster`
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' LoggingEnabled = TRUE|FALSE,
#' BucketName = "string",
#' S3KeyPrefix = "string",
#' LastSuccessfulDeliveryTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' LastFailureTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' LastFailureMessage = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$disable_logging(
#' ClusterIdentifier = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_disable_logging
redshift_disable_logging <- function(ClusterIdentifier) {
op <- new_operation(
name = "DisableLogging",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$disable_logging_input(ClusterIdentifier = ClusterIdentifier)
output <- .redshift$disable_logging_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$disable_logging <- redshift_disable_logging
#' Disables the automatic copying of snapshots from one region to another
#' region for a specified cluster
#'
#' @description
#' Disables the automatic copying of snapshots from one region to another
#' region for a specified cluster.
#'
#' If your cluster and its snapshots are encrypted using a customer master
#' key (CMK) from AWS KMS, use
#' [`delete_snapshot_copy_grant`][redshift_delete_snapshot_copy_grant] to
#' delete the grant that grants Amazon Redshift permission to the CMK in
#' the destination region.
#'
#' @usage
#' redshift_disable_snapshot_copy(ClusterIdentifier)
#'
#' @param ClusterIdentifier [required] The unique identifier of the source cluster that you want to disable
#' copying of snapshots to a destination region.
#'
#' Constraints: Must be the valid name of an existing cluster that has
#' cross-region snapshot copy enabled.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Cluster = list(
#' ClusterIdentifier = "string",
#' NodeType = "string",
#' ClusterStatus = "string",
#' ClusterAvailabilityStatus = "string",
#' ModifyStatus = "string",
#' MasterUsername = "string",
#' DBName = "string",
#' Endpoint = list(
#' Address = "string",
#' Port = 123,
#' VpcEndpoints = list(
#' list(
#' VpcEndpointId = "string"
#' )
#' )
#' ),
#' ClusterCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' AutomatedSnapshotRetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' ClusterSecurityGroups = list(
#' list(
#' ClusterSecurityGroupName = "string",
#' Status = "string"
#' )
#' ),
#' VpcSecurityGroups = list(
#' list(
#' VpcSecurityGroupId = "string",
#' Status = "string"
#' )
#' ),
#' ClusterParameterGroups = list(
#' list(
#' ParameterGroupName = "string",
#' ParameterApplyStatus = "string",
#' ClusterParameterStatusList = list(
#' list(
#' ParameterName = "string",
#' ParameterApplyStatus = "string",
#' ParameterApplyErrorDescription = "string"
#' )
#' )
#' )
#' ),
#' ClusterSubnetGroupName = "string",
#' VpcId = "string",
#' AvailabilityZone = "string",
#' PreferredMaintenanceWindow = "string",
#' PendingModifiedValues = list(
#' MasterUserPassword = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' ClusterType = "string",
#' ClusterVersion = "string",
#' AutomatedSnapshotRetentionPeriod = 123,
#' ClusterIdentifier = "string",
#' PubliclyAccessible = TRUE|FALSE,
#' EnhancedVpcRouting = TRUE|FALSE,
#' MaintenanceTrackName = "string",
#' EncryptionType = "string"
#' ),
#' ClusterVersion = "string",
#' AllowVersionUpgrade = TRUE|FALSE,
#' NumberOfNodes = 123,
#' PubliclyAccessible = TRUE|FALSE,
#' Encrypted = TRUE|FALSE,
#' RestoreStatus = list(
#' Status = "string",
#' CurrentRestoreRateInMegaBytesPerSecond = 123.0,
#' SnapshotSizeInMegaBytes = 123,
#' ProgressInMegaBytes = 123,
#' ElapsedTimeInSeconds = 123,
#' EstimatedTimeToCompletionInSeconds = 123
#' ),
#' DataTransferProgress = list(
#' Status = "string",
#' CurrentRateInMegaBytesPerSecond = 123.0,
#' TotalDataInMegaBytes = 123,
#' DataTransferredInMegaBytes = 123,
#' EstimatedTimeToCompletionInSeconds = 123,
#' ElapsedTimeInSeconds = 123
#' ),
#' HsmStatus = list(
#' HsmClientCertificateIdentifier = "string",
#' HsmConfigurationIdentifier = "string",
#' Status = "string"
#' ),
#' ClusterSnapshotCopyStatus = list(
#' DestinationRegion = "string",
#' RetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' SnapshotCopyGrantName = "string"
#' ),
#' ClusterPublicKey = "string",
#' ClusterNodes = list(
#' list(
#' NodeRole = "string",
#' PrivateIPAddress = "string",
#' PublicIPAddress = "string"
#' )
#' ),
#' ElasticIpStatus = list(
#' ElasticIp = "string",
#' Status = "string"
#' ),
#' ClusterRevisionNumber = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' ),
#' KmsKeyId = "string",
#' EnhancedVpcRouting = TRUE|FALSE,
#' IamRoles = list(
#' list(
#' IamRoleArn = "string",
#' ApplyStatus = "string"
#' )
#' ),
#' PendingActions = list(
#' "string"
#' ),
#' MaintenanceTrackName = "string",
#' ElasticResizeNumberOfNodeOptions = "string",
#' DeferredMaintenanceWindows = list(
#' list(
#' DeferMaintenanceIdentifier = "string",
#' DeferMaintenanceStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' DeferMaintenanceEndTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' ),
#' SnapshotScheduleIdentifier = "string",
#' SnapshotScheduleState = "MODIFYING"|"ACTIVE"|"FAILED",
#' ExpectedNextSnapshotScheduleTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ExpectedNextSnapshotScheduleTimeStatus = "string",
#' NextMaintenanceWindowStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ResizeInfo = list(
#' ResizeType = "string",
#' AllowCancelResize = TRUE|FALSE
#' ),
#' AvailabilityZoneRelocationStatus = "string",
#' ClusterNamespaceArn = "string"
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$disable_snapshot_copy(
#' ClusterIdentifier = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_disable_snapshot_copy
redshift_disable_snapshot_copy <- function(ClusterIdentifier) {
op <- new_operation(
name = "DisableSnapshotCopy",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$disable_snapshot_copy_input(ClusterIdentifier = ClusterIdentifier)
output <- .redshift$disable_snapshot_copy_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$disable_snapshot_copy <- redshift_disable_snapshot_copy
#' Starts logging information, such as queries and connection attempts, for
#' the specified Amazon Redshift cluster
#'
#' @description
#' Starts logging information, such as queries and connection attempts, for
#' the specified Amazon Redshift cluster.
#'
#' @usage
#' redshift_enable_logging(ClusterIdentifier, BucketName, S3KeyPrefix)
#'
#' @param ClusterIdentifier [required] The identifier of the cluster on which logging is to be started.
#'
#' Example: `examplecluster`
#' @param BucketName [required] The name of an existing S3 bucket where the log files are to be stored.
#'
#' Constraints:
#'
#' - Must be in the same region as the cluster
#'
#' - The cluster must have read bucket and put object permissions
#' @param S3KeyPrefix The prefix applied to the log file names.
#'
#' Constraints:
#'
#' - Cannot exceed 512 characters
#'
#' - Cannot contain spaces( ), double quotes ("), single quotes ('), a
#' backslash (\\), or control characters. The hexadecimal codes for
#' invalid characters are:
#'
#' - x00 to x20
#'
#' - x22
#'
#' - x27
#'
#' - x5c
#'
#' - x7f or larger
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' LoggingEnabled = TRUE|FALSE,
#' BucketName = "string",
#' S3KeyPrefix = "string",
#' LastSuccessfulDeliveryTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' LastFailureTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' LastFailureMessage = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$enable_logging(
#' ClusterIdentifier = "string",
#' BucketName = "string",
#' S3KeyPrefix = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_enable_logging
redshift_enable_logging <- function(ClusterIdentifier, BucketName, S3KeyPrefix = NULL) {
op <- new_operation(
name = "EnableLogging",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$enable_logging_input(ClusterIdentifier = ClusterIdentifier, BucketName = BucketName, S3KeyPrefix = S3KeyPrefix)
output <- .redshift$enable_logging_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$enable_logging <- redshift_enable_logging
#' Enables the automatic copy of snapshots from one region to another
#' region for a specified cluster
#'
#' @description
#' Enables the automatic copy of snapshots from one region to another
#' region for a specified cluster.
#'
#' @usage
#' redshift_enable_snapshot_copy(ClusterIdentifier, DestinationRegion,
#' RetentionPeriod, SnapshotCopyGrantName, ManualSnapshotRetentionPeriod)
#'
#' @param ClusterIdentifier [required] The unique identifier of the source cluster to copy snapshots from.
#'
#' Constraints: Must be the valid name of an existing cluster that does not
#' already have cross-region snapshot copy enabled.
#' @param DestinationRegion [required] The destination AWS Region that you want to copy snapshots to.
#'
#' Constraints: Must be the name of a valid AWS Region. For more
#' information, see [Regions and
#' Endpoints](https://docs.aws.amazon.com/general/latest/gr/rande.html#redshift_region)
#' in the Amazon Web Services General Reference.
#' @param RetentionPeriod The number of days to retain automated snapshots in the destination
#' region after they are copied from the source region.
#'
#' Default: 7.
#'
#' Constraints: Must be at least 1 and no more than 35.
#' @param SnapshotCopyGrantName The name of the snapshot copy grant to use when snapshots of an AWS
#' KMS-encrypted cluster are copied to the destination region.
#' @param ManualSnapshotRetentionPeriod The number of days to retain newly copied snapshots in the destination
#' AWS Region after they are copied from the source AWS Region. If the
#' value is -1, the manual snapshot is retained indefinitely.
#'
#' The value must be either -1 or an integer between 1 and 3,653.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Cluster = list(
#' ClusterIdentifier = "string",
#' NodeType = "string",
#' ClusterStatus = "string",
#' ClusterAvailabilityStatus = "string",
#' ModifyStatus = "string",
#' MasterUsername = "string",
#' DBName = "string",
#' Endpoint = list(
#' Address = "string",
#' Port = 123,
#' VpcEndpoints = list(
#' list(
#' VpcEndpointId = "string"
#' )
#' )
#' ),
#' ClusterCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' AutomatedSnapshotRetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' ClusterSecurityGroups = list(
#' list(
#' ClusterSecurityGroupName = "string",
#' Status = "string"
#' )
#' ),
#' VpcSecurityGroups = list(
#' list(
#' VpcSecurityGroupId = "string",
#' Status = "string"
#' )
#' ),
#' ClusterParameterGroups = list(
#' list(
#' ParameterGroupName = "string",
#' ParameterApplyStatus = "string",
#' ClusterParameterStatusList = list(
#' list(
#' ParameterName = "string",
#' ParameterApplyStatus = "string",
#' ParameterApplyErrorDescription = "string"
#' )
#' )
#' )
#' ),
#' ClusterSubnetGroupName = "string",
#' VpcId = "string",
#' AvailabilityZone = "string",
#' PreferredMaintenanceWindow = "string",
#' PendingModifiedValues = list(
#' MasterUserPassword = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' ClusterType = "string",
#' ClusterVersion = "string",
#' AutomatedSnapshotRetentionPeriod = 123,
#' ClusterIdentifier = "string",
#' PubliclyAccessible = TRUE|FALSE,
#' EnhancedVpcRouting = TRUE|FALSE,
#' MaintenanceTrackName = "string",
#' EncryptionType = "string"
#' ),
#' ClusterVersion = "string",
#' AllowVersionUpgrade = TRUE|FALSE,
#' NumberOfNodes = 123,
#' PubliclyAccessible = TRUE|FALSE,
#' Encrypted = TRUE|FALSE,
#' RestoreStatus = list(
#' Status = "string",
#' CurrentRestoreRateInMegaBytesPerSecond = 123.0,
#' SnapshotSizeInMegaBytes = 123,
#' ProgressInMegaBytes = 123,
#' ElapsedTimeInSeconds = 123,
#' EstimatedTimeToCompletionInSeconds = 123
#' ),
#' DataTransferProgress = list(
#' Status = "string",
#' CurrentRateInMegaBytesPerSecond = 123.0,
#' TotalDataInMegaBytes = 123,
#' DataTransferredInMegaBytes = 123,
#' EstimatedTimeToCompletionInSeconds = 123,
#' ElapsedTimeInSeconds = 123
#' ),
#' HsmStatus = list(
#' HsmClientCertificateIdentifier = "string",
#' HsmConfigurationIdentifier = "string",
#' Status = "string"
#' ),
#' ClusterSnapshotCopyStatus = list(
#' DestinationRegion = "string",
#' RetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' SnapshotCopyGrantName = "string"
#' ),
#' ClusterPublicKey = "string",
#' ClusterNodes = list(
#' list(
#' NodeRole = "string",
#' PrivateIPAddress = "string",
#' PublicIPAddress = "string"
#' )
#' ),
#' ElasticIpStatus = list(
#' ElasticIp = "string",
#' Status = "string"
#' ),
#' ClusterRevisionNumber = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' ),
#' KmsKeyId = "string",
#' EnhancedVpcRouting = TRUE|FALSE,
#' IamRoles = list(
#' list(
#' IamRoleArn = "string",
#' ApplyStatus = "string"
#' )
#' ),
#' PendingActions = list(
#' "string"
#' ),
#' MaintenanceTrackName = "string",
#' ElasticResizeNumberOfNodeOptions = "string",
#' DeferredMaintenanceWindows = list(
#' list(
#' DeferMaintenanceIdentifier = "string",
#' DeferMaintenanceStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' DeferMaintenanceEndTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' ),
#' SnapshotScheduleIdentifier = "string",
#' SnapshotScheduleState = "MODIFYING"|"ACTIVE"|"FAILED",
#' ExpectedNextSnapshotScheduleTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ExpectedNextSnapshotScheduleTimeStatus = "string",
#' NextMaintenanceWindowStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ResizeInfo = list(
#' ResizeType = "string",
#' AllowCancelResize = TRUE|FALSE
#' ),
#' AvailabilityZoneRelocationStatus = "string",
#' ClusterNamespaceArn = "string"
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$enable_snapshot_copy(
#' ClusterIdentifier = "string",
#' DestinationRegion = "string",
#' RetentionPeriod = 123,
#' SnapshotCopyGrantName = "string",
#' ManualSnapshotRetentionPeriod = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_enable_snapshot_copy
redshift_enable_snapshot_copy <- function(ClusterIdentifier, DestinationRegion, RetentionPeriod = NULL, SnapshotCopyGrantName = NULL, ManualSnapshotRetentionPeriod = NULL) {
op <- new_operation(
name = "EnableSnapshotCopy",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$enable_snapshot_copy_input(ClusterIdentifier = ClusterIdentifier, DestinationRegion = DestinationRegion, RetentionPeriod = RetentionPeriod, SnapshotCopyGrantName = SnapshotCopyGrantName, ManualSnapshotRetentionPeriod = ManualSnapshotRetentionPeriod)
output <- .redshift$enable_snapshot_copy_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$enable_snapshot_copy <- redshift_enable_snapshot_copy
#' Returns a database user name and temporary password with temporary
#' authorization to log on to an Amazon Redshift database
#'
#' @description
#' Returns a database user name and temporary password with temporary
#' authorization to log on to an Amazon Redshift database. The action
#' returns the database user name prefixed with `IAM:` if `AutoCreate` is
#' `False` or `IAMA:` if `AutoCreate` is `True`. You can optionally specify
#' one or more database user groups that the user will join at log on. By
#' default, the temporary credentials expire in 900 seconds. You can
#' optionally specify a duration between 900 seconds (15 minutes) and 3600
#' seconds (60 minutes). For more information, see [Using IAM
#' Authentication to Generate Database User
#' Credentials](https://docs.aws.amazon.com/redshift/latest/mgmt/generating-user-credentials.html)
#' in the Amazon Redshift Cluster Management Guide.
#'
#' The AWS Identity and Access Management (IAM)user or role that executes
#' GetClusterCredentials must have an IAM policy attached that allows
#' access to all necessary actions and resources. For more information
#' about permissions, see [Resource Policies for
#' GetClusterCredentials](https://docs.aws.amazon.com/redshift/latest/mgmt/redshift-iam-access-control-identity-based.html#redshift-policy-resources.getclustercredentials-resources)
#' in the Amazon Redshift Cluster Management Guide.
#'
#' If the `DbGroups` parameter is specified, the IAM policy must allow the
#' `redshift:JoinGroup` action with access to the listed `dbgroups`.
#'
#' In addition, if the `AutoCreate` parameter is set to `True`, then the
#' policy must include the `redshift:CreateClusterUser` privilege.
#'
#' If the `DbName` parameter is specified, the IAM policy must allow access
#' to the resource `dbname` for the specified database name.
#'
#' @usage
#' redshift_get_cluster_credentials(DbUser, DbName, ClusterIdentifier,
#' DurationSeconds, AutoCreate, DbGroups)
#'
#' @param DbUser [required] The name of a database user. If a user name matching `DbUser` exists in
#' the database, the temporary user credentials have the same permissions
#' as the existing user. If `DbUser` doesn't exist in the database and
#' `Autocreate` is `True`, a new user is created using the value for
#' `DbUser` with PUBLIC permissions. If a database user matching the value
#' for `DbUser` doesn't exist and `Autocreate` is `False`, then the command
#' succeeds but the connection attempt will fail because the user doesn't
#' exist in the database.
#'
#' For more information, see [CREATE
#' USER](https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_USER.html)
#' in the Amazon Redshift Database Developer Guide.
#'
#' Constraints:
#'
#' - Must be 1 to 64 alphanumeric characters or hyphens. The user name
#' can't be `PUBLIC`.
#'
#' - Must contain only lowercase letters, numbers, underscore, plus sign,
#' period (dot), at symbol (@@), or hyphen.
#'
#' - First character must be a letter.
#'
#' - Must not contain a colon ( : ) or slash ( / ).
#'
#' - Cannot be a reserved word. A list of reserved words can be found in
#' [Reserved
#' Words](https://docs.aws.amazon.com/redshift/latest/dg/r_pg_keywords.html)
#' in the Amazon Redshift Database Developer Guide.
#' @param DbName The name of a database that `DbUser` is authorized to log on to. If
#' `DbName` is not specified, `DbUser` can log on to any existing database.
#'
#' Constraints:
#'
#' - Must be 1 to 64 alphanumeric characters or hyphens
#'
#' - Must contain only lowercase letters, numbers, underscore, plus sign,
#' period (dot), at symbol (@@), or hyphen.
#'
#' - First character must be a letter.
#'
#' - Must not contain a colon ( : ) or slash ( / ).
#'
#' - Cannot be a reserved word. A list of reserved words can be found in
#' [Reserved
#' Words](https://docs.aws.amazon.com/redshift/latest/dg/r_pg_keywords.html)
#' in the Amazon Redshift Database Developer Guide.
#' @param ClusterIdentifier [required] The unique identifier of the cluster that contains the database for
#' which your are requesting credentials. This parameter is case sensitive.
#' @param DurationSeconds The number of seconds until the returned temporary password expires.
#'
#' Constraint: minimum 900, maximum 3600.
#'
#' Default: 900
#' @param AutoCreate Create a database user with the name specified for the user named in
#' `DbUser` if one does not exist.
#' @param DbGroups A list of the names of existing database groups that the user named in
#' `DbUser` will join for the current session, in addition to any group
#' memberships for an existing user. If not specified, a new user is added
#' only to PUBLIC.
#'
#' Database group name constraints
#'
#' - Must be 1 to 64 alphanumeric characters or hyphens
#'
#' - Must contain only lowercase letters, numbers, underscore, plus sign,
#' period (dot), at symbol (@@), or hyphen.
#'
#' - First character must be a letter.
#'
#' - Must not contain a colon ( : ) or slash ( / ).
#'
#' - Cannot be a reserved word. A list of reserved words can be found in
#' [Reserved
#' Words](https://docs.aws.amazon.com/redshift/latest/dg/r_pg_keywords.html)
#' in the Amazon Redshift Database Developer Guide.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' DbUser = "string",
#' DbPassword = "string",
#' Expiration = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$get_cluster_credentials(
#' DbUser = "string",
#' DbName = "string",
#' ClusterIdentifier = "string",
#' DurationSeconds = 123,
#' AutoCreate = TRUE|FALSE,
#' DbGroups = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_get_cluster_credentials
redshift_get_cluster_credentials <- function(DbUser, DbName = NULL, ClusterIdentifier, DurationSeconds = NULL, AutoCreate = NULL, DbGroups = NULL) {
op <- new_operation(
name = "GetClusterCredentials",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$get_cluster_credentials_input(DbUser = DbUser, DbName = DbName, ClusterIdentifier = ClusterIdentifier, DurationSeconds = DurationSeconds, AutoCreate = AutoCreate, DbGroups = DbGroups)
output <- .redshift$get_cluster_credentials_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$get_cluster_credentials <- redshift_get_cluster_credentials
#' Returns an array of DC2 ReservedNodeOfferings that matches the payment
#' type, term, and usage price of the given DC1 reserved node
#'
#' @description
#' Returns an array of DC2 ReservedNodeOfferings that matches the payment
#' type, term, and usage price of the given DC1 reserved node.
#'
#' @usage
#' redshift_get_reserved_node_exchange_offerings(ReservedNodeId,
#' MaxRecords, Marker)
#'
#' @param ReservedNodeId [required] A string representing the node identifier for the DC1 Reserved Node to
#' be exchanged.
#' @param MaxRecords An integer setting the maximum number of ReservedNodeOfferings to
#' retrieve.
#' @param Marker A value that indicates the starting point for the next set of
#' ReservedNodeOfferings.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Marker = "string",
#' ReservedNodeOfferings = list(
#' list(
#' ReservedNodeOfferingId = "string",
#' NodeType = "string",
#' Duration = 123,
#' FixedPrice = 123.0,
#' UsagePrice = 123.0,
#' CurrencyCode = "string",
#' OfferingType = "string",
#' RecurringCharges = list(
#' list(
#' RecurringChargeAmount = 123.0,
#' RecurringChargeFrequency = "string"
#' )
#' ),
#' ReservedNodeOfferingType = "Regular"|"Upgradable"
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$get_reserved_node_exchange_offerings(
#' ReservedNodeId = "string",
#' MaxRecords = 123,
#' Marker = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_get_reserved_node_exchange_offerings
redshift_get_reserved_node_exchange_offerings <- function(ReservedNodeId, MaxRecords = NULL, Marker = NULL) {
op <- new_operation(
name = "GetReservedNodeExchangeOfferings",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$get_reserved_node_exchange_offerings_input(ReservedNodeId = ReservedNodeId, MaxRecords = MaxRecords, Marker = Marker)
output <- .redshift$get_reserved_node_exchange_offerings_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$get_reserved_node_exchange_offerings <- redshift_get_reserved_node_exchange_offerings
#' Modifies the settings for a cluster
#'
#' @description
#' Modifies the settings for a cluster.
#'
#' You can also change node type and the number of nodes to scale up or
#' down the cluster. When resizing a cluster, you must specify both the
#' number of nodes and the node type even if one of the parameters does not
#' change.
#'
#' You can add another security or parameter group, or change the master
#' user password. Resetting a cluster password or modifying the security
#' groups associated with a cluster do not need a reboot. However,
#' modifying a parameter group requires a reboot for parameters to take
#' effect. For more information about managing clusters, go to [Amazon
#' Redshift
#' Clusters](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' @usage
#' redshift_modify_cluster(ClusterIdentifier, ClusterType, NodeType,
#' NumberOfNodes, ClusterSecurityGroups, VpcSecurityGroupIds,
#' MasterUserPassword, ClusterParameterGroupName,
#' AutomatedSnapshotRetentionPeriod, ManualSnapshotRetentionPeriod,
#' PreferredMaintenanceWindow, ClusterVersion, AllowVersionUpgrade,
#' HsmClientCertificateIdentifier, HsmConfigurationIdentifier,
#' NewClusterIdentifier, PubliclyAccessible, ElasticIp, EnhancedVpcRouting,
#' MaintenanceTrackName, Encrypted, KmsKeyId, AvailabilityZoneRelocation,
#' AvailabilityZone, Port)
#'
#' @param ClusterIdentifier [required] The unique identifier of the cluster to be modified.
#'
#' Example: `examplecluster`
#' @param ClusterType The new cluster type.
#'
#' When you submit your cluster resize request, your existing cluster goes
#' into a read-only mode. After Amazon Redshift provisions a new cluster
#' based on your resize requirements, there will be outage for a period
#' while the old cluster is deleted and your connection is switched to the
#' new cluster. You can use [`describe_resize`][redshift_describe_resize]
#' to track the progress of the resize request.
#'
#' Valid Values: ` multi-node | single-node `
#' @param NodeType The new node type of the cluster. If you specify a new node type, you
#' must also specify the number of nodes parameter.
#'
#' For more information about resizing clusters, go to [Resizing Clusters
#' in Amazon
#' Redshift](https://docs.aws.amazon.com/redshift/latest/mgmt/managing-cluster-operations.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' Valid Values: `ds2.xlarge` | `ds2.8xlarge` | `dc1.large` | `dc1.8xlarge`
#' | `dc2.large` | `dc2.8xlarge` | `ra3.xlplus` | `ra3.4xlarge` |
#' `ra3.16xlarge`
#' @param NumberOfNodes The new number of nodes of the cluster. If you specify a new number of
#' nodes, you must also specify the node type parameter.
#'
#' For more information about resizing clusters, go to [Resizing Clusters
#' in Amazon
#' Redshift](https://docs.aws.amazon.com/redshift/latest/mgmt/managing-cluster-operations.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' Valid Values: Integer greater than `0`.
#' @param ClusterSecurityGroups A list of cluster security groups to be authorized on this cluster. This
#' change is asynchronously applied as soon as possible.
#'
#' Security groups currently associated with the cluster, and not in the
#' list of groups to apply, will be revoked from the cluster.
#'
#' Constraints:
#'
#' - Must be 1 to 255 alphanumeric characters or hyphens
#'
#' - First character must be a letter
#'
#' - Cannot end with a hyphen or contain two consecutive hyphens
#' @param VpcSecurityGroupIds A list of virtual private cloud (VPC) security groups to be associated
#' with the cluster. This change is asynchronously applied as soon as
#' possible.
#' @param MasterUserPassword The new password for the cluster master user. This change is
#' asynchronously applied as soon as possible. Between the time of the
#' request and the completion of the request, the `MasterUserPassword`
#' element exists in the `PendingModifiedValues` element of the operation
#' response.
#'
#' Operations never return the password, so this operation provides a way
#' to regain access to the master user account for a cluster if the
#' password is lost.
#'
#' Default: Uses existing setting.
#'
#' Constraints:
#'
#' - Must be between 8 and 64 characters in length.
#'
#' - Must contain at least one uppercase letter.
#'
#' - Must contain at least one lowercase letter.
#'
#' - Must contain one number.
#'
#' - Can be any printable ASCII character (ASCII code 33 to 126) except '
#' (single quote), " (double quote), \\, /, @@, or space.
#' @param ClusterParameterGroupName The name of the cluster parameter group to apply to this cluster. This
#' change is applied only after the cluster is rebooted. To reboot a
#' cluster use [`reboot_cluster`][redshift_reboot_cluster].
#'
#' Default: Uses existing setting.
#'
#' Constraints: The cluster parameter group must be in the same parameter
#' group family that matches the cluster version.
#' @param AutomatedSnapshotRetentionPeriod The number of days that automated snapshots are retained. If the value
#' is 0, automated snapshots are disabled. Even if automated snapshots are
#' disabled, you can still create manual snapshots when you want with
#' [`create_cluster_snapshot`][redshift_create_cluster_snapshot].
#'
#' If you decrease the automated snapshot retention period from its current
#' value, existing automated snapshots that fall outside of the new
#' retention period will be immediately deleted.
#'
#' Default: Uses existing setting.
#'
#' Constraints: Must be a value from 0 to 35.
#' @param ManualSnapshotRetentionPeriod The default for number of days that a newly created manual snapshot is
#' retained. If the value is -1, the manual snapshot is retained
#' indefinitely. This value doesn't retroactively change the retention
#' periods of existing manual snapshots.
#'
#' The value must be either -1 or an integer between 1 and 3,653.
#'
#' The default value is -1.
#' @param PreferredMaintenanceWindow The weekly time range (in UTC) during which system maintenance can
#' occur, if necessary. If system maintenance is necessary during the
#' window, it may result in an outage.
#'
#' This maintenance window change is made immediately. If the new
#' maintenance window indicates the current time, there must be at least
#' 120 minutes between the current time and end of the window in order to
#' ensure that pending changes are applied.
#'
#' Default: Uses existing setting.
#'
#' Format: ddd:hh24:mi-ddd:hh24:mi, for example `wed:07:30-wed:08:00`.
#'
#' Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun
#'
#' Constraints: Must be at least 30 minutes.
#' @param ClusterVersion The new version number of the Amazon Redshift engine to upgrade to.
#'
#' For major version upgrades, if a non-default cluster parameter group is
#' currently in use, a new cluster parameter group in the cluster parameter
#' group family for the new version must be specified. The new cluster
#' parameter group can be the default for that cluster parameter group
#' family. For more information about parameters and parameter groups, go
#' to [Amazon Redshift Parameter
#' Groups](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' Example: `1.0`
#' @param AllowVersionUpgrade If `true`, major version upgrades will be applied automatically to the
#' cluster during the maintenance window.
#'
#' Default: `false`
#' @param HsmClientCertificateIdentifier Specifies the name of the HSM client certificate the Amazon Redshift
#' cluster uses to retrieve the data encryption keys stored in an HSM.
#' @param HsmConfigurationIdentifier Specifies the name of the HSM configuration that contains the
#' information the Amazon Redshift cluster can use to retrieve and store
#' keys in an HSM.
#' @param NewClusterIdentifier The new identifier for the cluster.
#'
#' Constraints:
#'
#' - Must contain from 1 to 63 alphanumeric characters or hyphens.
#'
#' - Alphabetic characters must be lowercase.
#'
#' - First character must be a letter.
#'
#' - Cannot end with a hyphen or contain two consecutive hyphens.
#'
#' - Must be unique for all clusters within an AWS account.
#'
#' Example: `examplecluster`
#' @param PubliclyAccessible If `true`, the cluster can be accessed from a public network. Only
#' clusters in VPCs can be set to be publicly available.
#' @param ElasticIp The Elastic IP (EIP) address for the cluster.
#'
#' Constraints: The cluster must be provisioned in EC2-VPC and
#' publicly-accessible through an Internet gateway. For more information
#' about provisioning clusters in EC2-VPC, go to [Supported Platforms to
#' Launch Your
#' Cluster](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#cluster-platforms)
#' in the Amazon Redshift Cluster Management Guide.
#' @param EnhancedVpcRouting An option that specifies whether to create the cluster with enhanced VPC
#' routing enabled. To create a cluster that uses enhanced VPC routing, the
#' cluster must be in a VPC. For more information, see [Enhanced VPC
#' Routing](https://docs.aws.amazon.com/redshift/latest/mgmt/enhanced-vpc-routing.html)
#' in the Amazon Redshift Cluster Management Guide.
#'
#' If this option is `true`, enhanced VPC routing is enabled.
#'
#' Default: false
#' @param MaintenanceTrackName The name for the maintenance track that you want to assign for the
#' cluster. This name change is asynchronous. The new track name stays in
#' the `PendingModifiedValues` for the cluster until the next maintenance
#' window. When the maintenance track changes, the cluster is switched to
#' the latest cluster release available for the maintenance track. At this
#' point, the maintenance track name is applied.
#' @param Encrypted Indicates whether the cluster is encrypted. If the value is encrypted
#' (true) and you provide a value for the `KmsKeyId` parameter, we encrypt
#' the cluster with the provided `KmsKeyId`. If you don't provide a
#' `KmsKeyId`, we encrypt with the default key.
#'
#' If the value is not encrypted (false), then the cluster is decrypted.
#' @param KmsKeyId The AWS Key Management Service (KMS) key ID of the encryption key that
#' you want to use to encrypt data in the cluster.
#' @param AvailabilityZoneRelocation The option to enable relocation for an Amazon Redshift cluster between
#' Availability Zones after the cluster modification is complete.
#' @param AvailabilityZone The option to initiate relocation for an Amazon Redshift cluster to the
#' target Availability Zone.
#' @param Port The option to change the port of an Amazon Redshift cluster.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Cluster = list(
#' ClusterIdentifier = "string",
#' NodeType = "string",
#' ClusterStatus = "string",
#' ClusterAvailabilityStatus = "string",
#' ModifyStatus = "string",
#' MasterUsername = "string",
#' DBName = "string",
#' Endpoint = list(
#' Address = "string",
#' Port = 123,
#' VpcEndpoints = list(
#' list(
#' VpcEndpointId = "string"
#' )
#' )
#' ),
#' ClusterCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' AutomatedSnapshotRetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' ClusterSecurityGroups = list(
#' list(
#' ClusterSecurityGroupName = "string",
#' Status = "string"
#' )
#' ),
#' VpcSecurityGroups = list(
#' list(
#' VpcSecurityGroupId = "string",
#' Status = "string"
#' )
#' ),
#' ClusterParameterGroups = list(
#' list(
#' ParameterGroupName = "string",
#' ParameterApplyStatus = "string",
#' ClusterParameterStatusList = list(
#' list(
#' ParameterName = "string",
#' ParameterApplyStatus = "string",
#' ParameterApplyErrorDescription = "string"
#' )
#' )
#' )
#' ),
#' ClusterSubnetGroupName = "string",
#' VpcId = "string",
#' AvailabilityZone = "string",
#' PreferredMaintenanceWindow = "string",
#' PendingModifiedValues = list(
#' MasterUserPassword = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' ClusterType = "string",
#' ClusterVersion = "string",
#' AutomatedSnapshotRetentionPeriod = 123,
#' ClusterIdentifier = "string",
#' PubliclyAccessible = TRUE|FALSE,
#' EnhancedVpcRouting = TRUE|FALSE,
#' MaintenanceTrackName = "string",
#' EncryptionType = "string"
#' ),
#' ClusterVersion = "string",
#' AllowVersionUpgrade = TRUE|FALSE,
#' NumberOfNodes = 123,
#' PubliclyAccessible = TRUE|FALSE,
#' Encrypted = TRUE|FALSE,
#' RestoreStatus = list(
#' Status = "string",
#' CurrentRestoreRateInMegaBytesPerSecond = 123.0,
#' SnapshotSizeInMegaBytes = 123,
#' ProgressInMegaBytes = 123,
#' ElapsedTimeInSeconds = 123,
#' EstimatedTimeToCompletionInSeconds = 123
#' ),
#' DataTransferProgress = list(
#' Status = "string",
#' CurrentRateInMegaBytesPerSecond = 123.0,
#' TotalDataInMegaBytes = 123,
#' DataTransferredInMegaBytes = 123,
#' EstimatedTimeToCompletionInSeconds = 123,
#' ElapsedTimeInSeconds = 123
#' ),
#' HsmStatus = list(
#' HsmClientCertificateIdentifier = "string",
#' HsmConfigurationIdentifier = "string",
#' Status = "string"
#' ),
#' ClusterSnapshotCopyStatus = list(
#' DestinationRegion = "string",
#' RetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' SnapshotCopyGrantName = "string"
#' ),
#' ClusterPublicKey = "string",
#' ClusterNodes = list(
#' list(
#' NodeRole = "string",
#' PrivateIPAddress = "string",
#' PublicIPAddress = "string"
#' )
#' ),
#' ElasticIpStatus = list(
#' ElasticIp = "string",
#' Status = "string"
#' ),
#' ClusterRevisionNumber = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' ),
#' KmsKeyId = "string",
#' EnhancedVpcRouting = TRUE|FALSE,
#' IamRoles = list(
#' list(
#' IamRoleArn = "string",
#' ApplyStatus = "string"
#' )
#' ),
#' PendingActions = list(
#' "string"
#' ),
#' MaintenanceTrackName = "string",
#' ElasticResizeNumberOfNodeOptions = "string",
#' DeferredMaintenanceWindows = list(
#' list(
#' DeferMaintenanceIdentifier = "string",
#' DeferMaintenanceStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' DeferMaintenanceEndTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' ),
#' SnapshotScheduleIdentifier = "string",
#' SnapshotScheduleState = "MODIFYING"|"ACTIVE"|"FAILED",
#' ExpectedNextSnapshotScheduleTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ExpectedNextSnapshotScheduleTimeStatus = "string",
#' NextMaintenanceWindowStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ResizeInfo = list(
#' ResizeType = "string",
#' AllowCancelResize = TRUE|FALSE
#' ),
#' AvailabilityZoneRelocationStatus = "string",
#' ClusterNamespaceArn = "string"
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$modify_cluster(
#' ClusterIdentifier = "string",
#' ClusterType = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' ClusterSecurityGroups = list(
#' "string"
#' ),
#' VpcSecurityGroupIds = list(
#' "string"
#' ),
#' MasterUserPassword = "string",
#' ClusterParameterGroupName = "string",
#' AutomatedSnapshotRetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' PreferredMaintenanceWindow = "string",
#' ClusterVersion = "string",
#' AllowVersionUpgrade = TRUE|FALSE,
#' HsmClientCertificateIdentifier = "string",
#' HsmConfigurationIdentifier = "string",
#' NewClusterIdentifier = "string",
#' PubliclyAccessible = TRUE|FALSE,
#' ElasticIp = "string",
#' EnhancedVpcRouting = TRUE|FALSE,
#' MaintenanceTrackName = "string",
#' Encrypted = TRUE|FALSE,
#' KmsKeyId = "string",
#' AvailabilityZoneRelocation = TRUE|FALSE,
#' AvailabilityZone = "string",
#' Port = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_modify_cluster
redshift_modify_cluster <- function(ClusterIdentifier, ClusterType = NULL, NodeType = NULL, NumberOfNodes = NULL, ClusterSecurityGroups = NULL, VpcSecurityGroupIds = NULL, MasterUserPassword = NULL, ClusterParameterGroupName = NULL, AutomatedSnapshotRetentionPeriod = NULL, ManualSnapshotRetentionPeriod = NULL, PreferredMaintenanceWindow = NULL, ClusterVersion = NULL, AllowVersionUpgrade = NULL, HsmClientCertificateIdentifier = NULL, HsmConfigurationIdentifier = NULL, NewClusterIdentifier = NULL, PubliclyAccessible = NULL, ElasticIp = NULL, EnhancedVpcRouting = NULL, MaintenanceTrackName = NULL, Encrypted = NULL, KmsKeyId = NULL, AvailabilityZoneRelocation = NULL, AvailabilityZone = NULL, Port = NULL) {
op <- new_operation(
name = "ModifyCluster",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$modify_cluster_input(ClusterIdentifier = ClusterIdentifier, ClusterType = ClusterType, NodeType = NodeType, NumberOfNodes = NumberOfNodes, ClusterSecurityGroups = ClusterSecurityGroups, VpcSecurityGroupIds = VpcSecurityGroupIds, MasterUserPassword = MasterUserPassword, ClusterParameterGroupName = ClusterParameterGroupName, AutomatedSnapshotRetentionPeriod = AutomatedSnapshotRetentionPeriod, ManualSnapshotRetentionPeriod = ManualSnapshotRetentionPeriod, PreferredMaintenanceWindow = PreferredMaintenanceWindow, ClusterVersion = ClusterVersion, AllowVersionUpgrade = AllowVersionUpgrade, HsmClientCertificateIdentifier = HsmClientCertificateIdentifier, HsmConfigurationIdentifier = HsmConfigurationIdentifier, NewClusterIdentifier = NewClusterIdentifier, PubliclyAccessible = PubliclyAccessible, ElasticIp = ElasticIp, EnhancedVpcRouting = EnhancedVpcRouting, MaintenanceTrackName = MaintenanceTrackName, Encrypted = Encrypted, KmsKeyId = KmsKeyId, AvailabilityZoneRelocation = AvailabilityZoneRelocation, AvailabilityZone = AvailabilityZone, Port = Port)
output <- .redshift$modify_cluster_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$modify_cluster <- redshift_modify_cluster
#' Modifies the database revision of a cluster
#'
#' @description
#' Modifies the database revision of a cluster. The database revision is a
#' unique revision of the database running in a cluster.
#'
#' @usage
#' redshift_modify_cluster_db_revision(ClusterIdentifier, RevisionTarget)
#'
#' @param ClusterIdentifier [required] The unique identifier of a cluster whose database revision you want to
#' modify.
#'
#' Example: `examplecluster`
#' @param RevisionTarget [required] The identifier of the database revision. You can retrieve this value
#' from the response to the
#' [`describe_cluster_db_revisions`][redshift_describe_cluster_db_revisions]
#' request.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Cluster = list(
#' ClusterIdentifier = "string",
#' NodeType = "string",
#' ClusterStatus = "string",
#' ClusterAvailabilityStatus = "string",
#' ModifyStatus = "string",
#' MasterUsername = "string",
#' DBName = "string",
#' Endpoint = list(
#' Address = "string",
#' Port = 123,
#' VpcEndpoints = list(
#' list(
#' VpcEndpointId = "string"
#' )
#' )
#' ),
#' ClusterCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' AutomatedSnapshotRetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' ClusterSecurityGroups = list(
#' list(
#' ClusterSecurityGroupName = "string",
#' Status = "string"
#' )
#' ),
#' VpcSecurityGroups = list(
#' list(
#' VpcSecurityGroupId = "string",
#' Status = "string"
#' )
#' ),
#' ClusterParameterGroups = list(
#' list(
#' ParameterGroupName = "string",
#' ParameterApplyStatus = "string",
#' ClusterParameterStatusList = list(
#' list(
#' ParameterName = "string",
#' ParameterApplyStatus = "string",
#' ParameterApplyErrorDescription = "string"
#' )
#' )
#' )
#' ),
#' ClusterSubnetGroupName = "string",
#' VpcId = "string",
#' AvailabilityZone = "string",
#' PreferredMaintenanceWindow = "string",
#' PendingModifiedValues = list(
#' MasterUserPassword = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' ClusterType = "string",
#' ClusterVersion = "string",
#' AutomatedSnapshotRetentionPeriod = 123,
#' ClusterIdentifier = "string",
#' PubliclyAccessible = TRUE|FALSE,
#' EnhancedVpcRouting = TRUE|FALSE,
#' MaintenanceTrackName = "string",
#' EncryptionType = "string"
#' ),
#' ClusterVersion = "string",
#' AllowVersionUpgrade = TRUE|FALSE,
#' NumberOfNodes = 123,
#' PubliclyAccessible = TRUE|FALSE,
#' Encrypted = TRUE|FALSE,
#' RestoreStatus = list(
#' Status = "string",
#' CurrentRestoreRateInMegaBytesPerSecond = 123.0,
#' SnapshotSizeInMegaBytes = 123,
#' ProgressInMegaBytes = 123,
#' ElapsedTimeInSeconds = 123,
#' EstimatedTimeToCompletionInSeconds = 123
#' ),
#' DataTransferProgress = list(
#' Status = "string",
#' CurrentRateInMegaBytesPerSecond = 123.0,
#' TotalDataInMegaBytes = 123,
#' DataTransferredInMegaBytes = 123,
#' EstimatedTimeToCompletionInSeconds = 123,
#' ElapsedTimeInSeconds = 123
#' ),
#' HsmStatus = list(
#' HsmClientCertificateIdentifier = "string",
#' HsmConfigurationIdentifier = "string",
#' Status = "string"
#' ),
#' ClusterSnapshotCopyStatus = list(
#' DestinationRegion = "string",
#' RetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' SnapshotCopyGrantName = "string"
#' ),
#' ClusterPublicKey = "string",
#' ClusterNodes = list(
#' list(
#' NodeRole = "string",
#' PrivateIPAddress = "string",
#' PublicIPAddress = "string"
#' )
#' ),
#' ElasticIpStatus = list(
#' ElasticIp = "string",
#' Status = "string"
#' ),
#' ClusterRevisionNumber = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' ),
#' KmsKeyId = "string",
#' EnhancedVpcRouting = TRUE|FALSE,
#' IamRoles = list(
#' list(
#' IamRoleArn = "string",
#' ApplyStatus = "string"
#' )
#' ),
#' PendingActions = list(
#' "string"
#' ),
#' MaintenanceTrackName = "string",
#' ElasticResizeNumberOfNodeOptions = "string",
#' DeferredMaintenanceWindows = list(
#' list(
#' DeferMaintenanceIdentifier = "string",
#' DeferMaintenanceStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' DeferMaintenanceEndTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' ),
#' SnapshotScheduleIdentifier = "string",
#' SnapshotScheduleState = "MODIFYING"|"ACTIVE"|"FAILED",
#' ExpectedNextSnapshotScheduleTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ExpectedNextSnapshotScheduleTimeStatus = "string",
#' NextMaintenanceWindowStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ResizeInfo = list(
#' ResizeType = "string",
#' AllowCancelResize = TRUE|FALSE
#' ),
#' AvailabilityZoneRelocationStatus = "string",
#' ClusterNamespaceArn = "string"
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$modify_cluster_db_revision(
#' ClusterIdentifier = "string",
#' RevisionTarget = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_modify_cluster_db_revision
redshift_modify_cluster_db_revision <- function(ClusterIdentifier, RevisionTarget) {
op <- new_operation(
name = "ModifyClusterDbRevision",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$modify_cluster_db_revision_input(ClusterIdentifier = ClusterIdentifier, RevisionTarget = RevisionTarget)
output <- .redshift$modify_cluster_db_revision_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$modify_cluster_db_revision <- redshift_modify_cluster_db_revision
#' Modifies the list of AWS Identity and Access Management (IAM) roles that
#' can be used by the cluster to access other AWS services
#'
#' @description
#' Modifies the list of AWS Identity and Access Management (IAM) roles that
#' can be used by the cluster to access other AWS services.
#'
#' A cluster can have up to 10 IAM roles associated at any time.
#'
#' @usage
#' redshift_modify_cluster_iam_roles(ClusterIdentifier, AddIamRoles,
#' RemoveIamRoles)
#'
#' @param ClusterIdentifier [required] The unique identifier of the cluster for which you want to associate or
#' disassociate IAM roles.
#' @param AddIamRoles Zero or more IAM roles to associate with the cluster. The roles must be
#' in their Amazon Resource Name (ARN) format. You can associate up to 10
#' IAM roles with a single cluster in a single request.
#' @param RemoveIamRoles Zero or more IAM roles in ARN format to disassociate from the cluster.
#' You can disassociate up to 10 IAM roles from a single cluster in a
#' single request.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Cluster = list(
#' ClusterIdentifier = "string",
#' NodeType = "string",
#' ClusterStatus = "string",
#' ClusterAvailabilityStatus = "string",
#' ModifyStatus = "string",
#' MasterUsername = "string",
#' DBName = "string",
#' Endpoint = list(
#' Address = "string",
#' Port = 123,
#' VpcEndpoints = list(
#' list(
#' VpcEndpointId = "string"
#' )
#' )
#' ),
#' ClusterCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' AutomatedSnapshotRetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' ClusterSecurityGroups = list(
#' list(
#' ClusterSecurityGroupName = "string",
#' Status = "string"
#' )
#' ),
#' VpcSecurityGroups = list(
#' list(
#' VpcSecurityGroupId = "string",
#' Status = "string"
#' )
#' ),
#' ClusterParameterGroups = list(
#' list(
#' ParameterGroupName = "string",
#' ParameterApplyStatus = "string",
#' ClusterParameterStatusList = list(
#' list(
#' ParameterName = "string",
#' ParameterApplyStatus = "string",
#' ParameterApplyErrorDescription = "string"
#' )
#' )
#' )
#' ),
#' ClusterSubnetGroupName = "string",
#' VpcId = "string",
#' AvailabilityZone = "string",
#' PreferredMaintenanceWindow = "string",
#' PendingModifiedValues = list(
#' MasterUserPassword = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' ClusterType = "string",
#' ClusterVersion = "string",
#' AutomatedSnapshotRetentionPeriod = 123,
#' ClusterIdentifier = "string",
#' PubliclyAccessible = TRUE|FALSE,
#' EnhancedVpcRouting = TRUE|FALSE,
#' MaintenanceTrackName = "string",
#' EncryptionType = "string"
#' ),
#' ClusterVersion = "string",
#' AllowVersionUpgrade = TRUE|FALSE,
#' NumberOfNodes = 123,
#' PubliclyAccessible = TRUE|FALSE,
#' Encrypted = TRUE|FALSE,
#' RestoreStatus = list(
#' Status = "string",
#' CurrentRestoreRateInMegaBytesPerSecond = 123.0,
#' SnapshotSizeInMegaBytes = 123,
#' ProgressInMegaBytes = 123,
#' ElapsedTimeInSeconds = 123,
#' EstimatedTimeToCompletionInSeconds = 123
#' ),
#' DataTransferProgress = list(
#' Status = "string",
#' CurrentRateInMegaBytesPerSecond = 123.0,
#' TotalDataInMegaBytes = 123,
#' DataTransferredInMegaBytes = 123,
#' EstimatedTimeToCompletionInSeconds = 123,
#' ElapsedTimeInSeconds = 123
#' ),
#' HsmStatus = list(
#' HsmClientCertificateIdentifier = "string",
#' HsmConfigurationIdentifier = "string",
#' Status = "string"
#' ),
#' ClusterSnapshotCopyStatus = list(
#' DestinationRegion = "string",
#' RetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' SnapshotCopyGrantName = "string"
#' ),
#' ClusterPublicKey = "string",
#' ClusterNodes = list(
#' list(
#' NodeRole = "string",
#' PrivateIPAddress = "string",
#' PublicIPAddress = "string"
#' )
#' ),
#' ElasticIpStatus = list(
#' ElasticIp = "string",
#' Status = "string"
#' ),
#' ClusterRevisionNumber = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' ),
#' KmsKeyId = "string",
#' EnhancedVpcRouting = TRUE|FALSE,
#' IamRoles = list(
#' list(
#' IamRoleArn = "string",
#' ApplyStatus = "string"
#' )
#' ),
#' PendingActions = list(
#' "string"
#' ),
#' MaintenanceTrackName = "string",
#' ElasticResizeNumberOfNodeOptions = "string",
#' DeferredMaintenanceWindows = list(
#' list(
#' DeferMaintenanceIdentifier = "string",
#' DeferMaintenanceStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' DeferMaintenanceEndTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' ),
#' SnapshotScheduleIdentifier = "string",
#' SnapshotScheduleState = "MODIFYING"|"ACTIVE"|"FAILED",
#' ExpectedNextSnapshotScheduleTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ExpectedNextSnapshotScheduleTimeStatus = "string",
#' NextMaintenanceWindowStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ResizeInfo = list(
#' ResizeType = "string",
#' AllowCancelResize = TRUE|FALSE
#' ),
#' AvailabilityZoneRelocationStatus = "string",
#' ClusterNamespaceArn = "string"
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$modify_cluster_iam_roles(
#' ClusterIdentifier = "string",
#' AddIamRoles = list(
#' "string"
#' ),
#' RemoveIamRoles = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_modify_cluster_iam_roles
redshift_modify_cluster_iam_roles <- function(ClusterIdentifier, AddIamRoles = NULL, RemoveIamRoles = NULL) {
op <- new_operation(
name = "ModifyClusterIamRoles",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$modify_cluster_iam_roles_input(ClusterIdentifier = ClusterIdentifier, AddIamRoles = AddIamRoles, RemoveIamRoles = RemoveIamRoles)
output <- .redshift$modify_cluster_iam_roles_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$modify_cluster_iam_roles <- redshift_modify_cluster_iam_roles
#' Modifies the maintenance settings of a cluster
#'
#' @description
#' Modifies the maintenance settings of a cluster.
#'
#' @usage
#' redshift_modify_cluster_maintenance(ClusterIdentifier, DeferMaintenance,
#' DeferMaintenanceIdentifier, DeferMaintenanceStartTime,
#' DeferMaintenanceEndTime, DeferMaintenanceDuration)
#'
#' @param ClusterIdentifier [required] A unique identifier for the cluster.
#' @param DeferMaintenance A boolean indicating whether to enable the deferred maintenance window.
#' @param DeferMaintenanceIdentifier A unique identifier for the deferred maintenance window.
#' @param DeferMaintenanceStartTime A timestamp indicating the start time for the deferred maintenance
#' window.
#' @param DeferMaintenanceEndTime A timestamp indicating end time for the deferred maintenance window. If
#' you specify an end time, you can't specify a duration.
#' @param DeferMaintenanceDuration An integer indicating the duration of the maintenance window in days. If
#' you specify a duration, you can't specify an end time. The duration must
#' be 45 days or less.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Cluster = list(
#' ClusterIdentifier = "string",
#' NodeType = "string",
#' ClusterStatus = "string",
#' ClusterAvailabilityStatus = "string",
#' ModifyStatus = "string",
#' MasterUsername = "string",
#' DBName = "string",
#' Endpoint = list(
#' Address = "string",
#' Port = 123,
#' VpcEndpoints = list(
#' list(
#' VpcEndpointId = "string"
#' )
#' )
#' ),
#' ClusterCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' AutomatedSnapshotRetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' ClusterSecurityGroups = list(
#' list(
#' ClusterSecurityGroupName = "string",
#' Status = "string"
#' )
#' ),
#' VpcSecurityGroups = list(
#' list(
#' VpcSecurityGroupId = "string",
#' Status = "string"
#' )
#' ),
#' ClusterParameterGroups = list(
#' list(
#' ParameterGroupName = "string",
#' ParameterApplyStatus = "string",
#' ClusterParameterStatusList = list(
#' list(
#' ParameterName = "string",
#' ParameterApplyStatus = "string",
#' ParameterApplyErrorDescription = "string"
#' )
#' )
#' )
#' ),
#' ClusterSubnetGroupName = "string",
#' VpcId = "string",
#' AvailabilityZone = "string",
#' PreferredMaintenanceWindow = "string",
#' PendingModifiedValues = list(
#' MasterUserPassword = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' ClusterType = "string",
#' ClusterVersion = "string",
#' AutomatedSnapshotRetentionPeriod = 123,
#' ClusterIdentifier = "string",
#' PubliclyAccessible = TRUE|FALSE,
#' EnhancedVpcRouting = TRUE|FALSE,
#' MaintenanceTrackName = "string",
#' EncryptionType = "string"
#' ),
#' ClusterVersion = "string",
#' AllowVersionUpgrade = TRUE|FALSE,
#' NumberOfNodes = 123,
#' PubliclyAccessible = TRUE|FALSE,
#' Encrypted = TRUE|FALSE,
#' RestoreStatus = list(
#' Status = "string",
#' CurrentRestoreRateInMegaBytesPerSecond = 123.0,
#' SnapshotSizeInMegaBytes = 123,
#' ProgressInMegaBytes = 123,
#' ElapsedTimeInSeconds = 123,
#' EstimatedTimeToCompletionInSeconds = 123
#' ),
#' DataTransferProgress = list(
#' Status = "string",
#' CurrentRateInMegaBytesPerSecond = 123.0,
#' TotalDataInMegaBytes = 123,
#' DataTransferredInMegaBytes = 123,
#' EstimatedTimeToCompletionInSeconds = 123,
#' ElapsedTimeInSeconds = 123
#' ),
#' HsmStatus = list(
#' HsmClientCertificateIdentifier = "string",
#' HsmConfigurationIdentifier = "string",
#' Status = "string"
#' ),
#' ClusterSnapshotCopyStatus = list(
#' DestinationRegion = "string",
#' RetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' SnapshotCopyGrantName = "string"
#' ),
#' ClusterPublicKey = "string",
#' ClusterNodes = list(
#' list(
#' NodeRole = "string",
#' PrivateIPAddress = "string",
#' PublicIPAddress = "string"
#' )
#' ),
#' ElasticIpStatus = list(
#' ElasticIp = "string",
#' Status = "string"
#' ),
#' ClusterRevisionNumber = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' ),
#' KmsKeyId = "string",
#' EnhancedVpcRouting = TRUE|FALSE,
#' IamRoles = list(
#' list(
#' IamRoleArn = "string",
#' ApplyStatus = "string"
#' )
#' ),
#' PendingActions = list(
#' "string"
#' ),
#' MaintenanceTrackName = "string",
#' ElasticResizeNumberOfNodeOptions = "string",
#' DeferredMaintenanceWindows = list(
#' list(
#' DeferMaintenanceIdentifier = "string",
#' DeferMaintenanceStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' DeferMaintenanceEndTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' ),
#' SnapshotScheduleIdentifier = "string",
#' SnapshotScheduleState = "MODIFYING"|"ACTIVE"|"FAILED",
#' ExpectedNextSnapshotScheduleTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ExpectedNextSnapshotScheduleTimeStatus = "string",
#' NextMaintenanceWindowStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ResizeInfo = list(
#' ResizeType = "string",
#' AllowCancelResize = TRUE|FALSE
#' ),
#' AvailabilityZoneRelocationStatus = "string",
#' ClusterNamespaceArn = "string"
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$modify_cluster_maintenance(
#' ClusterIdentifier = "string",
#' DeferMaintenance = TRUE|FALSE,
#' DeferMaintenanceIdentifier = "string",
#' DeferMaintenanceStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' DeferMaintenanceEndTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' DeferMaintenanceDuration = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_modify_cluster_maintenance
redshift_modify_cluster_maintenance <- function(ClusterIdentifier, DeferMaintenance = NULL, DeferMaintenanceIdentifier = NULL, DeferMaintenanceStartTime = NULL, DeferMaintenanceEndTime = NULL, DeferMaintenanceDuration = NULL) {
op <- new_operation(
name = "ModifyClusterMaintenance",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$modify_cluster_maintenance_input(ClusterIdentifier = ClusterIdentifier, DeferMaintenance = DeferMaintenance, DeferMaintenanceIdentifier = DeferMaintenanceIdentifier, DeferMaintenanceStartTime = DeferMaintenanceStartTime, DeferMaintenanceEndTime = DeferMaintenanceEndTime, DeferMaintenanceDuration = DeferMaintenanceDuration)
output <- .redshift$modify_cluster_maintenance_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$modify_cluster_maintenance <- redshift_modify_cluster_maintenance
#' Modifies the parameters of a parameter group
#'
#' @description
#' Modifies the parameters of a parameter group.
#'
#' For more information about parameters and parameter groups, go to
#' [Amazon Redshift Parameter
#' Groups](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' @usage
#' redshift_modify_cluster_parameter_group(ParameterGroupName, Parameters)
#'
#' @param ParameterGroupName [required] The name of the parameter group to be modified.
#' @param Parameters [required] An array of parameters to be modified. A maximum of 20 parameters can be
#' modified in a single request.
#'
#' For each parameter to be modified, you must supply at least the
#' parameter name and parameter value; other name-value pairs of the
#' parameter are optional.
#'
#' For the workload management (WLM) configuration, you must supply all the
#' name-value pairs in the wlm_json_configuration parameter.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' ParameterGroupName = "string",
#' ParameterGroupStatus = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$modify_cluster_parameter_group(
#' ParameterGroupName = "string",
#' Parameters = list(
#' list(
#' ParameterName = "string",
#' ParameterValue = "string",
#' Description = "string",
#' Source = "string",
#' DataType = "string",
#' AllowedValues = "string",
#' ApplyType = "static"|"dynamic",
#' IsModifiable = TRUE|FALSE,
#' MinimumEngineVersion = "string"
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_modify_cluster_parameter_group
redshift_modify_cluster_parameter_group <- function(ParameterGroupName, Parameters) {
op <- new_operation(
name = "ModifyClusterParameterGroup",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$modify_cluster_parameter_group_input(ParameterGroupName = ParameterGroupName, Parameters = Parameters)
output <- .redshift$modify_cluster_parameter_group_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$modify_cluster_parameter_group <- redshift_modify_cluster_parameter_group
#' Modifies the settings for a snapshot
#'
#' @description
#' Modifies the settings for a snapshot.
#'
#' This exanmple modifies the manual retention period setting for a cluster
#' snapshot.
#'
#' @usage
#' redshift_modify_cluster_snapshot(SnapshotIdentifier,
#' ManualSnapshotRetentionPeriod, Force)
#'
#' @param SnapshotIdentifier [required] The identifier of the snapshot whose setting you want to modify.
#' @param ManualSnapshotRetentionPeriod The number of days that a manual snapshot is retained. If the value is
#' -1, the manual snapshot is retained indefinitely.
#'
#' If the manual snapshot falls outside of the new retention period, you
#' can specify the force option to immediately delete the snapshot.
#'
#' The value must be either -1 or an integer between 1 and 3,653.
#' @param Force A Boolean option to override an exception if the retention period has
#' already passed.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Snapshot = list(
#' SnapshotIdentifier = "string",
#' ClusterIdentifier = "string",
#' SnapshotCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' Status = "string",
#' Port = 123,
#' AvailabilityZone = "string",
#' ClusterCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' MasterUsername = "string",
#' ClusterVersion = "string",
#' EngineFullVersion = "string",
#' SnapshotType = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' DBName = "string",
#' VpcId = "string",
#' Encrypted = TRUE|FALSE,
#' KmsKeyId = "string",
#' EncryptedWithHSM = TRUE|FALSE,
#' AccountsWithRestoreAccess = list(
#' list(
#' AccountId = "string",
#' AccountAlias = "string"
#' )
#' ),
#' OwnerAccount = "string",
#' TotalBackupSizeInMegaBytes = 123.0,
#' ActualIncrementalBackupSizeInMegaBytes = 123.0,
#' BackupProgressInMegaBytes = 123.0,
#' CurrentBackupRateInMegaBytesPerSecond = 123.0,
#' EstimatedSecondsToCompletion = 123,
#' ElapsedTimeInSeconds = 123,
#' SourceRegion = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' ),
#' RestorableNodeTypes = list(
#' "string"
#' ),
#' EnhancedVpcRouting = TRUE|FALSE,
#' MaintenanceTrackName = "string",
#' ManualSnapshotRetentionPeriod = 123,
#' ManualSnapshotRemainingDays = 123,
#' SnapshotRetentionStartTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$modify_cluster_snapshot(
#' SnapshotIdentifier = "string",
#' ManualSnapshotRetentionPeriod = 123,
#' Force = TRUE|FALSE
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_modify_cluster_snapshot
redshift_modify_cluster_snapshot <- function(SnapshotIdentifier, ManualSnapshotRetentionPeriod = NULL, Force = NULL) {
op <- new_operation(
name = "ModifyClusterSnapshot",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$modify_cluster_snapshot_input(SnapshotIdentifier = SnapshotIdentifier, ManualSnapshotRetentionPeriod = ManualSnapshotRetentionPeriod, Force = Force)
output <- .redshift$modify_cluster_snapshot_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$modify_cluster_snapshot <- redshift_modify_cluster_snapshot
#' Modifies a snapshot schedule for a cluster
#'
#' @description
#' Modifies a snapshot schedule for a cluster.
#'
#' @usage
#' redshift_modify_cluster_snapshot_schedule(ClusterIdentifier,
#' ScheduleIdentifier, DisassociateSchedule)
#'
#' @param ClusterIdentifier [required] A unique identifier for the cluster whose snapshot schedule you want to
#' modify.
#' @param ScheduleIdentifier A unique alphanumeric identifier for the schedule that you want to
#' associate with the cluster.
#' @param DisassociateSchedule A boolean to indicate whether to remove the assoiciation between the
#' cluster and the schedule.
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$modify_cluster_snapshot_schedule(
#' ClusterIdentifier = "string",
#' ScheduleIdentifier = "string",
#' DisassociateSchedule = TRUE|FALSE
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_modify_cluster_snapshot_schedule
redshift_modify_cluster_snapshot_schedule <- function(ClusterIdentifier, ScheduleIdentifier = NULL, DisassociateSchedule = NULL) {
op <- new_operation(
name = "ModifyClusterSnapshotSchedule",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$modify_cluster_snapshot_schedule_input(ClusterIdentifier = ClusterIdentifier, ScheduleIdentifier = ScheduleIdentifier, DisassociateSchedule = DisassociateSchedule)
output <- .redshift$modify_cluster_snapshot_schedule_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$modify_cluster_snapshot_schedule <- redshift_modify_cluster_snapshot_schedule
#' Modifies a cluster subnet group to include the specified list of VPC
#' subnets
#'
#' @description
#' Modifies a cluster subnet group to include the specified list of VPC
#' subnets. The operation replaces the existing list of subnets with the
#' new list of subnets.
#'
#' @usage
#' redshift_modify_cluster_subnet_group(ClusterSubnetGroupName,
#' Description, SubnetIds)
#'
#' @param ClusterSubnetGroupName [required] The name of the subnet group to be modified.
#' @param Description A text description of the subnet group to be modified.
#' @param SubnetIds [required] An array of VPC subnet IDs. A maximum of 20 subnets can be modified in a
#' single request.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' ClusterSubnetGroup = list(
#' ClusterSubnetGroupName = "string",
#' Description = "string",
#' VpcId = "string",
#' SubnetGroupStatus = "string",
#' Subnets = list(
#' list(
#' SubnetIdentifier = "string",
#' SubnetAvailabilityZone = list(
#' Name = "string",
#' SupportedPlatforms = list(
#' list(
#' Name = "string"
#' )
#' )
#' ),
#' SubnetStatus = "string"
#' )
#' ),
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$modify_cluster_subnet_group(
#' ClusterSubnetGroupName = "string",
#' Description = "string",
#' SubnetIds = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_modify_cluster_subnet_group
redshift_modify_cluster_subnet_group <- function(ClusterSubnetGroupName, Description = NULL, SubnetIds) {
op <- new_operation(
name = "ModifyClusterSubnetGroup",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$modify_cluster_subnet_group_input(ClusterSubnetGroupName = ClusterSubnetGroupName, Description = Description, SubnetIds = SubnetIds)
output <- .redshift$modify_cluster_subnet_group_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$modify_cluster_subnet_group <- redshift_modify_cluster_subnet_group
#' Modifies an existing Amazon Redshift event notification subscription
#'
#' @description
#' Modifies an existing Amazon Redshift event notification subscription.
#'
#' @usage
#' redshift_modify_event_subscription(SubscriptionName, SnsTopicArn,
#' SourceType, SourceIds, EventCategories, Severity, Enabled)
#'
#' @param SubscriptionName [required] The name of the modified Amazon Redshift event notification
#' subscription.
#' @param SnsTopicArn The Amazon Resource Name (ARN) of the SNS topic to be used by the event
#' notification subscription.
#' @param SourceType The type of source that will be generating the events. For example, if
#' you want to be notified of events generated by a cluster, you would set
#' this parameter to cluster. If this value is not specified, events are
#' returned for all Amazon Redshift objects in your AWS account. You must
#' specify a source type in order to specify source IDs.
#'
#' Valid values: cluster, cluster-parameter-group, cluster-security-group,
#' cluster-snapshot, and scheduled-action.
#' @param SourceIds A list of one or more identifiers of Amazon Redshift source objects. All
#' of the objects must be of the same type as was specified in the source
#' type parameter. The event subscription will return only events generated
#' by the specified objects. If not specified, then events are returned for
#' all objects within the source type specified.
#'
#' Example: my-cluster-1, my-cluster-2
#'
#' Example: my-snapshot-20131010
#' @param EventCategories Specifies the Amazon Redshift event categories to be published by the
#' event notification subscription.
#'
#' Values: configuration, management, monitoring, security
#' @param Severity Specifies the Amazon Redshift event severity to be published by the
#' event notification subscription.
#'
#' Values: ERROR, INFO
#' @param Enabled A Boolean value indicating if the subscription is enabled. `true`
#' indicates the subscription is enabled
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' EventSubscription = list(
#' CustomerAwsId = "string",
#' CustSubscriptionId = "string",
#' SnsTopicArn = "string",
#' Status = "string",
#' SubscriptionCreationTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' SourceType = "string",
#' SourceIdsList = list(
#' "string"
#' ),
#' EventCategoriesList = list(
#' "string"
#' ),
#' Severity = "string",
#' Enabled = TRUE|FALSE,
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$modify_event_subscription(
#' SubscriptionName = "string",
#' SnsTopicArn = "string",
#' SourceType = "string",
#' SourceIds = list(
#' "string"
#' ),
#' EventCategories = list(
#' "string"
#' ),
#' Severity = "string",
#' Enabled = TRUE|FALSE
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_modify_event_subscription
redshift_modify_event_subscription <- function(SubscriptionName, SnsTopicArn = NULL, SourceType = NULL, SourceIds = NULL, EventCategories = NULL, Severity = NULL, Enabled = NULL) {
op <- new_operation(
name = "ModifyEventSubscription",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$modify_event_subscription_input(SubscriptionName = SubscriptionName, SnsTopicArn = SnsTopicArn, SourceType = SourceType, SourceIds = SourceIds, EventCategories = EventCategories, Severity = Severity, Enabled = Enabled)
output <- .redshift$modify_event_subscription_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$modify_event_subscription <- redshift_modify_event_subscription
#' Modifies a scheduled action
#'
#' @description
#' Modifies a scheduled action.
#'
#' @usage
#' redshift_modify_scheduled_action(ScheduledActionName, TargetAction,
#' Schedule, IamRole, ScheduledActionDescription, StartTime, EndTime,
#' Enable)
#'
#' @param ScheduledActionName [required] The name of the scheduled action to modify.
#' @param TargetAction A modified JSON format of the scheduled action. For more information
#' about this parameter, see ScheduledAction.
#' @param Schedule A modified schedule in either `at( )` or `cron( )` format. For more
#' information about this parameter, see ScheduledAction.
#' @param IamRole A different IAM role to assume to run the target action. For more
#' information about this parameter, see ScheduledAction.
#' @param ScheduledActionDescription A modified description of the scheduled action.
#' @param StartTime A modified start time of the scheduled action. For more information
#' about this parameter, see ScheduledAction.
#' @param EndTime A modified end time of the scheduled action. For more information about
#' this parameter, see ScheduledAction.
#' @param Enable A modified enable flag of the scheduled action. If true, the scheduled
#' action is active. If false, the scheduled action is disabled.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' ScheduledActionName = "string",
#' TargetAction = list(
#' ResizeCluster = list(
#' ClusterIdentifier = "string",
#' ClusterType = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' Classic = TRUE|FALSE
#' ),
#' PauseCluster = list(
#' ClusterIdentifier = "string"
#' ),
#' ResumeCluster = list(
#' ClusterIdentifier = "string"
#' )
#' ),
#' Schedule = "string",
#' IamRole = "string",
#' ScheduledActionDescription = "string",
#' State = "ACTIVE"|"DISABLED",
#' NextInvocations = list(
#' as.POSIXct(
#' "2015-01-01"
#' )
#' ),
#' StartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' EndTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$modify_scheduled_action(
#' ScheduledActionName = "string",
#' TargetAction = list(
#' ResizeCluster = list(
#' ClusterIdentifier = "string",
#' ClusterType = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' Classic = TRUE|FALSE
#' ),
#' PauseCluster = list(
#' ClusterIdentifier = "string"
#' ),
#' ResumeCluster = list(
#' ClusterIdentifier = "string"
#' )
#' ),
#' Schedule = "string",
#' IamRole = "string",
#' ScheduledActionDescription = "string",
#' StartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' EndTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' Enable = TRUE|FALSE
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_modify_scheduled_action
redshift_modify_scheduled_action <- function(ScheduledActionName, TargetAction = NULL, Schedule = NULL, IamRole = NULL, ScheduledActionDescription = NULL, StartTime = NULL, EndTime = NULL, Enable = NULL) {
op <- new_operation(
name = "ModifyScheduledAction",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$modify_scheduled_action_input(ScheduledActionName = ScheduledActionName, TargetAction = TargetAction, Schedule = Schedule, IamRole = IamRole, ScheduledActionDescription = ScheduledActionDescription, StartTime = StartTime, EndTime = EndTime, Enable = Enable)
output <- .redshift$modify_scheduled_action_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$modify_scheduled_action <- redshift_modify_scheduled_action
#' Modifies the number of days to retain snapshots in the destination AWS
#' Region after they are copied from the source AWS Region
#'
#' @description
#' Modifies the number of days to retain snapshots in the destination AWS
#' Region after they are copied from the source AWS Region. By default,
#' this operation only changes the retention period of copied automated
#' snapshots. The retention periods for both new and existing copied
#' automated snapshots are updated with the new retention period. You can
#' set the manual option to change only the retention periods of copied
#' manual snapshots. If you set this option, only newly copied manual
#' snapshots have the new retention period.
#'
#' @usage
#' redshift_modify_snapshot_copy_retention_period(ClusterIdentifier,
#' RetentionPeriod, Manual)
#'
#' @param ClusterIdentifier [required] The unique identifier of the cluster for which you want to change the
#' retention period for either automated or manual snapshots that are
#' copied to a destination AWS Region.
#'
#' Constraints: Must be the valid name of an existing cluster that has
#' cross-region snapshot copy enabled.
#' @param RetentionPeriod [required] The number of days to retain automated snapshots in the destination AWS
#' Region after they are copied from the source AWS Region.
#'
#' By default, this only changes the retention period of copied automated
#' snapshots.
#'
#' If you decrease the retention period for automated snapshots that are
#' copied to a destination AWS Region, Amazon Redshift deletes any existing
#' automated snapshots that were copied to the destination AWS Region and
#' that fall outside of the new retention period.
#'
#' Constraints: Must be at least 1 and no more than 35 for automated
#' snapshots.
#'
#' If you specify the `manual` option, only newly copied manual snapshots
#' will have the new retention period.
#'
#' If you specify the value of -1 newly copied manual snapshots are
#' retained indefinitely.
#'
#' Constraints: The number of days must be either -1 or an integer between
#' 1 and 3,653 for manual snapshots.
#' @param Manual Indicates whether to apply the snapshot retention period to newly copied
#' manual snapshots instead of automated snapshots.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Cluster = list(
#' ClusterIdentifier = "string",
#' NodeType = "string",
#' ClusterStatus = "string",
#' ClusterAvailabilityStatus = "string",
#' ModifyStatus = "string",
#' MasterUsername = "string",
#' DBName = "string",
#' Endpoint = list(
#' Address = "string",
#' Port = 123,
#' VpcEndpoints = list(
#' list(
#' VpcEndpointId = "string"
#' )
#' )
#' ),
#' ClusterCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' AutomatedSnapshotRetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' ClusterSecurityGroups = list(
#' list(
#' ClusterSecurityGroupName = "string",
#' Status = "string"
#' )
#' ),
#' VpcSecurityGroups = list(
#' list(
#' VpcSecurityGroupId = "string",
#' Status = "string"
#' )
#' ),
#' ClusterParameterGroups = list(
#' list(
#' ParameterGroupName = "string",
#' ParameterApplyStatus = "string",
#' ClusterParameterStatusList = list(
#' list(
#' ParameterName = "string",
#' ParameterApplyStatus = "string",
#' ParameterApplyErrorDescription = "string"
#' )
#' )
#' )
#' ),
#' ClusterSubnetGroupName = "string",
#' VpcId = "string",
#' AvailabilityZone = "string",
#' PreferredMaintenanceWindow = "string",
#' PendingModifiedValues = list(
#' MasterUserPassword = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' ClusterType = "string",
#' ClusterVersion = "string",
#' AutomatedSnapshotRetentionPeriod = 123,
#' ClusterIdentifier = "string",
#' PubliclyAccessible = TRUE|FALSE,
#' EnhancedVpcRouting = TRUE|FALSE,
#' MaintenanceTrackName = "string",
#' EncryptionType = "string"
#' ),
#' ClusterVersion = "string",
#' AllowVersionUpgrade = TRUE|FALSE,
#' NumberOfNodes = 123,
#' PubliclyAccessible = TRUE|FALSE,
#' Encrypted = TRUE|FALSE,
#' RestoreStatus = list(
#' Status = "string",
#' CurrentRestoreRateInMegaBytesPerSecond = 123.0,
#' SnapshotSizeInMegaBytes = 123,
#' ProgressInMegaBytes = 123,
#' ElapsedTimeInSeconds = 123,
#' EstimatedTimeToCompletionInSeconds = 123
#' ),
#' DataTransferProgress = list(
#' Status = "string",
#' CurrentRateInMegaBytesPerSecond = 123.0,
#' TotalDataInMegaBytes = 123,
#' DataTransferredInMegaBytes = 123,
#' EstimatedTimeToCompletionInSeconds = 123,
#' ElapsedTimeInSeconds = 123
#' ),
#' HsmStatus = list(
#' HsmClientCertificateIdentifier = "string",
#' HsmConfigurationIdentifier = "string",
#' Status = "string"
#' ),
#' ClusterSnapshotCopyStatus = list(
#' DestinationRegion = "string",
#' RetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' SnapshotCopyGrantName = "string"
#' ),
#' ClusterPublicKey = "string",
#' ClusterNodes = list(
#' list(
#' NodeRole = "string",
#' PrivateIPAddress = "string",
#' PublicIPAddress = "string"
#' )
#' ),
#' ElasticIpStatus = list(
#' ElasticIp = "string",
#' Status = "string"
#' ),
#' ClusterRevisionNumber = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' ),
#' KmsKeyId = "string",
#' EnhancedVpcRouting = TRUE|FALSE,
#' IamRoles = list(
#' list(
#' IamRoleArn = "string",
#' ApplyStatus = "string"
#' )
#' ),
#' PendingActions = list(
#' "string"
#' ),
#' MaintenanceTrackName = "string",
#' ElasticResizeNumberOfNodeOptions = "string",
#' DeferredMaintenanceWindows = list(
#' list(
#' DeferMaintenanceIdentifier = "string",
#' DeferMaintenanceStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' DeferMaintenanceEndTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' ),
#' SnapshotScheduleIdentifier = "string",
#' SnapshotScheduleState = "MODIFYING"|"ACTIVE"|"FAILED",
#' ExpectedNextSnapshotScheduleTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ExpectedNextSnapshotScheduleTimeStatus = "string",
#' NextMaintenanceWindowStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ResizeInfo = list(
#' ResizeType = "string",
#' AllowCancelResize = TRUE|FALSE
#' ),
#' AvailabilityZoneRelocationStatus = "string",
#' ClusterNamespaceArn = "string"
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$modify_snapshot_copy_retention_period(
#' ClusterIdentifier = "string",
#' RetentionPeriod = 123,
#' Manual = TRUE|FALSE
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_modify_snapshot_copy_retention_period
redshift_modify_snapshot_copy_retention_period <- function(ClusterIdentifier, RetentionPeriod, Manual = NULL) {
op <- new_operation(
name = "ModifySnapshotCopyRetentionPeriod",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$modify_snapshot_copy_retention_period_input(ClusterIdentifier = ClusterIdentifier, RetentionPeriod = RetentionPeriod, Manual = Manual)
output <- .redshift$modify_snapshot_copy_retention_period_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$modify_snapshot_copy_retention_period <- redshift_modify_snapshot_copy_retention_period
#' Modifies a snapshot schedule
#'
#' @description
#' Modifies a snapshot schedule. Any schedule associated with a cluster is
#' modified asynchronously.
#'
#' @usage
#' redshift_modify_snapshot_schedule(ScheduleIdentifier,
#' ScheduleDefinitions)
#'
#' @param ScheduleIdentifier [required] A unique alphanumeric identifier of the schedule to modify.
#' @param ScheduleDefinitions [required] An updated list of schedule definitions. A schedule definition is made
#' up of schedule expressions, for example, "cron(30 12 *)" or "rate(12
#' hours)".
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' ScheduleDefinitions = list(
#' "string"
#' ),
#' ScheduleIdentifier = "string",
#' ScheduleDescription = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' ),
#' NextInvocations = list(
#' as.POSIXct(
#' "2015-01-01"
#' )
#' ),
#' AssociatedClusterCount = 123,
#' AssociatedClusters = list(
#' list(
#' ClusterIdentifier = "string",
#' ScheduleAssociationState = "MODIFYING"|"ACTIVE"|"FAILED"
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$modify_snapshot_schedule(
#' ScheduleIdentifier = "string",
#' ScheduleDefinitions = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_modify_snapshot_schedule
redshift_modify_snapshot_schedule <- function(ScheduleIdentifier, ScheduleDefinitions) {
op <- new_operation(
name = "ModifySnapshotSchedule",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$modify_snapshot_schedule_input(ScheduleIdentifier = ScheduleIdentifier, ScheduleDefinitions = ScheduleDefinitions)
output <- .redshift$modify_snapshot_schedule_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$modify_snapshot_schedule <- redshift_modify_snapshot_schedule
#' Modifies a usage limit in a cluster
#'
#' @description
#' Modifies a usage limit in a cluster. You can't modify the feature type
#' or period of a usage limit.
#'
#' @usage
#' redshift_modify_usage_limit(UsageLimitId, Amount, BreachAction)
#'
#' @param UsageLimitId [required] The identifier of the usage limit to modify.
#' @param Amount The new limit amount. For more information about this parameter, see
#' UsageLimit.
#' @param BreachAction The new action that Amazon Redshift takes when the limit is reached. For
#' more information about this parameter, see UsageLimit.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' UsageLimitId = "string",
#' ClusterIdentifier = "string",
#' FeatureType = "spectrum"|"concurrency-scaling",
#' LimitType = "time"|"data-scanned",
#' Amount = 123,
#' Period = "daily"|"weekly"|"monthly",
#' BreachAction = "log"|"emit-metric"|"disable",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$modify_usage_limit(
#' UsageLimitId = "string",
#' Amount = 123,
#' BreachAction = "log"|"emit-metric"|"disable"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_modify_usage_limit
redshift_modify_usage_limit <- function(UsageLimitId, Amount = NULL, BreachAction = NULL) {
op <- new_operation(
name = "ModifyUsageLimit",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$modify_usage_limit_input(UsageLimitId = UsageLimitId, Amount = Amount, BreachAction = BreachAction)
output <- .redshift$modify_usage_limit_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$modify_usage_limit <- redshift_modify_usage_limit
#' Pauses a cluster
#'
#' @description
#' Pauses a cluster.
#'
#' @usage
#' redshift_pause_cluster(ClusterIdentifier)
#'
#' @param ClusterIdentifier [required] The identifier of the cluster to be paused.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Cluster = list(
#' ClusterIdentifier = "string",
#' NodeType = "string",
#' ClusterStatus = "string",
#' ClusterAvailabilityStatus = "string",
#' ModifyStatus = "string",
#' MasterUsername = "string",
#' DBName = "string",
#' Endpoint = list(
#' Address = "string",
#' Port = 123,
#' VpcEndpoints = list(
#' list(
#' VpcEndpointId = "string"
#' )
#' )
#' ),
#' ClusterCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' AutomatedSnapshotRetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' ClusterSecurityGroups = list(
#' list(
#' ClusterSecurityGroupName = "string",
#' Status = "string"
#' )
#' ),
#' VpcSecurityGroups = list(
#' list(
#' VpcSecurityGroupId = "string",
#' Status = "string"
#' )
#' ),
#' ClusterParameterGroups = list(
#' list(
#' ParameterGroupName = "string",
#' ParameterApplyStatus = "string",
#' ClusterParameterStatusList = list(
#' list(
#' ParameterName = "string",
#' ParameterApplyStatus = "string",
#' ParameterApplyErrorDescription = "string"
#' )
#' )
#' )
#' ),
#' ClusterSubnetGroupName = "string",
#' VpcId = "string",
#' AvailabilityZone = "string",
#' PreferredMaintenanceWindow = "string",
#' PendingModifiedValues = list(
#' MasterUserPassword = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' ClusterType = "string",
#' ClusterVersion = "string",
#' AutomatedSnapshotRetentionPeriod = 123,
#' ClusterIdentifier = "string",
#' PubliclyAccessible = TRUE|FALSE,
#' EnhancedVpcRouting = TRUE|FALSE,
#' MaintenanceTrackName = "string",
#' EncryptionType = "string"
#' ),
#' ClusterVersion = "string",
#' AllowVersionUpgrade = TRUE|FALSE,
#' NumberOfNodes = 123,
#' PubliclyAccessible = TRUE|FALSE,
#' Encrypted = TRUE|FALSE,
#' RestoreStatus = list(
#' Status = "string",
#' CurrentRestoreRateInMegaBytesPerSecond = 123.0,
#' SnapshotSizeInMegaBytes = 123,
#' ProgressInMegaBytes = 123,
#' ElapsedTimeInSeconds = 123,
#' EstimatedTimeToCompletionInSeconds = 123
#' ),
#' DataTransferProgress = list(
#' Status = "string",
#' CurrentRateInMegaBytesPerSecond = 123.0,
#' TotalDataInMegaBytes = 123,
#' DataTransferredInMegaBytes = 123,
#' EstimatedTimeToCompletionInSeconds = 123,
#' ElapsedTimeInSeconds = 123
#' ),
#' HsmStatus = list(
#' HsmClientCertificateIdentifier = "string",
#' HsmConfigurationIdentifier = "string",
#' Status = "string"
#' ),
#' ClusterSnapshotCopyStatus = list(
#' DestinationRegion = "string",
#' RetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' SnapshotCopyGrantName = "string"
#' ),
#' ClusterPublicKey = "string",
#' ClusterNodes = list(
#' list(
#' NodeRole = "string",
#' PrivateIPAddress = "string",
#' PublicIPAddress = "string"
#' )
#' ),
#' ElasticIpStatus = list(
#' ElasticIp = "string",
#' Status = "string"
#' ),
#' ClusterRevisionNumber = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' ),
#' KmsKeyId = "string",
#' EnhancedVpcRouting = TRUE|FALSE,
#' IamRoles = list(
#' list(
#' IamRoleArn = "string",
#' ApplyStatus = "string"
#' )
#' ),
#' PendingActions = list(
#' "string"
#' ),
#' MaintenanceTrackName = "string",
#' ElasticResizeNumberOfNodeOptions = "string",
#' DeferredMaintenanceWindows = list(
#' list(
#' DeferMaintenanceIdentifier = "string",
#' DeferMaintenanceStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' DeferMaintenanceEndTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' ),
#' SnapshotScheduleIdentifier = "string",
#' SnapshotScheduleState = "MODIFYING"|"ACTIVE"|"FAILED",
#' ExpectedNextSnapshotScheduleTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ExpectedNextSnapshotScheduleTimeStatus = "string",
#' NextMaintenanceWindowStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ResizeInfo = list(
#' ResizeType = "string",
#' AllowCancelResize = TRUE|FALSE
#' ),
#' AvailabilityZoneRelocationStatus = "string",
#' ClusterNamespaceArn = "string"
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$pause_cluster(
#' ClusterIdentifier = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_pause_cluster
redshift_pause_cluster <- function(ClusterIdentifier) {
op <- new_operation(
name = "PauseCluster",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$pause_cluster_input(ClusterIdentifier = ClusterIdentifier)
output <- .redshift$pause_cluster_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$pause_cluster <- redshift_pause_cluster
#' Allows you to purchase reserved nodes
#'
#' @description
#' Allows you to purchase reserved nodes. Amazon Redshift offers a
#' predefined set of reserved node offerings. You can purchase one or more
#' of the offerings. You can call the
#' [`describe_reserved_node_offerings`][redshift_describe_reserved_node_offerings]
#' API to obtain the available reserved node offerings. You can call this
#' API by providing a specific reserved node offering and the number of
#' nodes you want to reserve.
#'
#' For more information about reserved node offerings, go to [Purchasing
#' Reserved
#' Nodes](https://docs.aws.amazon.com/redshift/latest/mgmt/purchase-reserved-node-instance.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' @usage
#' redshift_purchase_reserved_node_offering(ReservedNodeOfferingId,
#' NodeCount)
#'
#' @param ReservedNodeOfferingId [required] The unique identifier of the reserved node offering you want to
#' purchase.
#' @param NodeCount The number of reserved nodes that you want to purchase.
#'
#' Default: `1`
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' ReservedNode = list(
#' ReservedNodeId = "string",
#' ReservedNodeOfferingId = "string",
#' NodeType = "string",
#' StartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' Duration = 123,
#' FixedPrice = 123.0,
#' UsagePrice = 123.0,
#' CurrencyCode = "string",
#' NodeCount = 123,
#' State = "string",
#' OfferingType = "string",
#' RecurringCharges = list(
#' list(
#' RecurringChargeAmount = 123.0,
#' RecurringChargeFrequency = "string"
#' )
#' ),
#' ReservedNodeOfferingType = "Regular"|"Upgradable"
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$purchase_reserved_node_offering(
#' ReservedNodeOfferingId = "string",
#' NodeCount = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_purchase_reserved_node_offering
redshift_purchase_reserved_node_offering <- function(ReservedNodeOfferingId, NodeCount = NULL) {
op <- new_operation(
name = "PurchaseReservedNodeOffering",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$purchase_reserved_node_offering_input(ReservedNodeOfferingId = ReservedNodeOfferingId, NodeCount = NodeCount)
output <- .redshift$purchase_reserved_node_offering_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$purchase_reserved_node_offering <- redshift_purchase_reserved_node_offering
#' Reboots a cluster
#'
#' @description
#' Reboots a cluster. This action is taken as soon as possible. It results
#' in a momentary outage to the cluster, during which the cluster status is
#' set to `rebooting`. A cluster event is created when the reboot is
#' completed. Any pending cluster modifications (see
#' [`modify_cluster`][redshift_modify_cluster]) are applied at this reboot.
#' For more information about managing clusters, go to [Amazon Redshift
#' Clusters](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' @usage
#' redshift_reboot_cluster(ClusterIdentifier)
#'
#' @param ClusterIdentifier [required] The cluster identifier.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Cluster = list(
#' ClusterIdentifier = "string",
#' NodeType = "string",
#' ClusterStatus = "string",
#' ClusterAvailabilityStatus = "string",
#' ModifyStatus = "string",
#' MasterUsername = "string",
#' DBName = "string",
#' Endpoint = list(
#' Address = "string",
#' Port = 123,
#' VpcEndpoints = list(
#' list(
#' VpcEndpointId = "string"
#' )
#' )
#' ),
#' ClusterCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' AutomatedSnapshotRetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' ClusterSecurityGroups = list(
#' list(
#' ClusterSecurityGroupName = "string",
#' Status = "string"
#' )
#' ),
#' VpcSecurityGroups = list(
#' list(
#' VpcSecurityGroupId = "string",
#' Status = "string"
#' )
#' ),
#' ClusterParameterGroups = list(
#' list(
#' ParameterGroupName = "string",
#' ParameterApplyStatus = "string",
#' ClusterParameterStatusList = list(
#' list(
#' ParameterName = "string",
#' ParameterApplyStatus = "string",
#' ParameterApplyErrorDescription = "string"
#' )
#' )
#' )
#' ),
#' ClusterSubnetGroupName = "string",
#' VpcId = "string",
#' AvailabilityZone = "string",
#' PreferredMaintenanceWindow = "string",
#' PendingModifiedValues = list(
#' MasterUserPassword = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' ClusterType = "string",
#' ClusterVersion = "string",
#' AutomatedSnapshotRetentionPeriod = 123,
#' ClusterIdentifier = "string",
#' PubliclyAccessible = TRUE|FALSE,
#' EnhancedVpcRouting = TRUE|FALSE,
#' MaintenanceTrackName = "string",
#' EncryptionType = "string"
#' ),
#' ClusterVersion = "string",
#' AllowVersionUpgrade = TRUE|FALSE,
#' NumberOfNodes = 123,
#' PubliclyAccessible = TRUE|FALSE,
#' Encrypted = TRUE|FALSE,
#' RestoreStatus = list(
#' Status = "string",
#' CurrentRestoreRateInMegaBytesPerSecond = 123.0,
#' SnapshotSizeInMegaBytes = 123,
#' ProgressInMegaBytes = 123,
#' ElapsedTimeInSeconds = 123,
#' EstimatedTimeToCompletionInSeconds = 123
#' ),
#' DataTransferProgress = list(
#' Status = "string",
#' CurrentRateInMegaBytesPerSecond = 123.0,
#' TotalDataInMegaBytes = 123,
#' DataTransferredInMegaBytes = 123,
#' EstimatedTimeToCompletionInSeconds = 123,
#' ElapsedTimeInSeconds = 123
#' ),
#' HsmStatus = list(
#' HsmClientCertificateIdentifier = "string",
#' HsmConfigurationIdentifier = "string",
#' Status = "string"
#' ),
#' ClusterSnapshotCopyStatus = list(
#' DestinationRegion = "string",
#' RetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' SnapshotCopyGrantName = "string"
#' ),
#' ClusterPublicKey = "string",
#' ClusterNodes = list(
#' list(
#' NodeRole = "string",
#' PrivateIPAddress = "string",
#' PublicIPAddress = "string"
#' )
#' ),
#' ElasticIpStatus = list(
#' ElasticIp = "string",
#' Status = "string"
#' ),
#' ClusterRevisionNumber = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' ),
#' KmsKeyId = "string",
#' EnhancedVpcRouting = TRUE|FALSE,
#' IamRoles = list(
#' list(
#' IamRoleArn = "string",
#' ApplyStatus = "string"
#' )
#' ),
#' PendingActions = list(
#' "string"
#' ),
#' MaintenanceTrackName = "string",
#' ElasticResizeNumberOfNodeOptions = "string",
#' DeferredMaintenanceWindows = list(
#' list(
#' DeferMaintenanceIdentifier = "string",
#' DeferMaintenanceStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' DeferMaintenanceEndTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' ),
#' SnapshotScheduleIdentifier = "string",
#' SnapshotScheduleState = "MODIFYING"|"ACTIVE"|"FAILED",
#' ExpectedNextSnapshotScheduleTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ExpectedNextSnapshotScheduleTimeStatus = "string",
#' NextMaintenanceWindowStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ResizeInfo = list(
#' ResizeType = "string",
#' AllowCancelResize = TRUE|FALSE
#' ),
#' AvailabilityZoneRelocationStatus = "string",
#' ClusterNamespaceArn = "string"
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$reboot_cluster(
#' ClusterIdentifier = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_reboot_cluster
redshift_reboot_cluster <- function(ClusterIdentifier) {
op <- new_operation(
name = "RebootCluster",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$reboot_cluster_input(ClusterIdentifier = ClusterIdentifier)
output <- .redshift$reboot_cluster_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$reboot_cluster <- redshift_reboot_cluster
#' Sets one or more parameters of the specified parameter group to their
#' default values and sets the source values of the parameters to
#' "engine-default"
#'
#' @description
#' Sets one or more parameters of the specified parameter group to their
#' default values and sets the source values of the parameters to
#' "engine-default". To reset the entire parameter group specify the
#' *ResetAllParameters* parameter. For parameter changes to take effect you
#' must reboot any associated clusters.
#'
#' @usage
#' redshift_reset_cluster_parameter_group(ParameterGroupName,
#' ResetAllParameters, Parameters)
#'
#' @param ParameterGroupName [required] The name of the cluster parameter group to be reset.
#' @param ResetAllParameters If `true`, all parameters in the specified parameter group will be reset
#' to their default values.
#'
#' Default: `true`
#' @param Parameters An array of names of parameters to be reset. If *ResetAllParameters*
#' option is not used, then at least one parameter name must be supplied.
#'
#' Constraints: A maximum of 20 parameters can be reset in a single
#' request.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' ParameterGroupName = "string",
#' ParameterGroupStatus = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$reset_cluster_parameter_group(
#' ParameterGroupName = "string",
#' ResetAllParameters = TRUE|FALSE,
#' Parameters = list(
#' list(
#' ParameterName = "string",
#' ParameterValue = "string",
#' Description = "string",
#' Source = "string",
#' DataType = "string",
#' AllowedValues = "string",
#' ApplyType = "static"|"dynamic",
#' IsModifiable = TRUE|FALSE,
#' MinimumEngineVersion = "string"
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_reset_cluster_parameter_group
redshift_reset_cluster_parameter_group <- function(ParameterGroupName, ResetAllParameters = NULL, Parameters = NULL) {
op <- new_operation(
name = "ResetClusterParameterGroup",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$reset_cluster_parameter_group_input(ParameterGroupName = ParameterGroupName, ResetAllParameters = ResetAllParameters, Parameters = Parameters)
output <- .redshift$reset_cluster_parameter_group_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$reset_cluster_parameter_group <- redshift_reset_cluster_parameter_group
#' Changes the size of the cluster
#'
#' @description
#' Changes the size of the cluster. You can change the cluster's type, or
#' change the number or type of nodes. The default behavior is to use the
#' elastic resize method. With an elastic resize, your cluster is available
#' for read and write operations more quickly than with the classic resize
#' method.
#'
#' Elastic resize operations have the following restrictions:
#'
#' - You can only resize clusters of the following types:
#'
#' - dc1.large (if your cluster is in a VPC)
#'
#' - dc1.8xlarge (if your cluster is in a VPC)
#'
#' - dc2.large
#'
#' - dc2.8xlarge
#'
#' - ds2.xlarge
#'
#' - ds2.8xlarge
#'
#' - ra3.xlplus
#'
#' - ra3.4xlarge
#'
#' - ra3.16xlarge
#'
#' - The type of nodes that you add must match the node type for the
#' cluster.
#'
#' @usage
#' redshift_resize_cluster(ClusterIdentifier, ClusterType, NodeType,
#' NumberOfNodes, Classic)
#'
#' @param ClusterIdentifier [required] The unique identifier for the cluster to resize.
#' @param ClusterType The new cluster type for the specified cluster.
#' @param NodeType The new node type for the nodes you are adding. If not specified, the
#' cluster's current node type is used.
#' @param NumberOfNodes The new number of nodes for the cluster. If not specified, the cluster's
#' current number of nodes is used.
#' @param Classic A boolean value indicating whether the resize operation is using the
#' classic resize process. If you don't provide this parameter or set the
#' value to `false`, the resize type is elastic.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Cluster = list(
#' ClusterIdentifier = "string",
#' NodeType = "string",
#' ClusterStatus = "string",
#' ClusterAvailabilityStatus = "string",
#' ModifyStatus = "string",
#' MasterUsername = "string",
#' DBName = "string",
#' Endpoint = list(
#' Address = "string",
#' Port = 123,
#' VpcEndpoints = list(
#' list(
#' VpcEndpointId = "string"
#' )
#' )
#' ),
#' ClusterCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' AutomatedSnapshotRetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' ClusterSecurityGroups = list(
#' list(
#' ClusterSecurityGroupName = "string",
#' Status = "string"
#' )
#' ),
#' VpcSecurityGroups = list(
#' list(
#' VpcSecurityGroupId = "string",
#' Status = "string"
#' )
#' ),
#' ClusterParameterGroups = list(
#' list(
#' ParameterGroupName = "string",
#' ParameterApplyStatus = "string",
#' ClusterParameterStatusList = list(
#' list(
#' ParameterName = "string",
#' ParameterApplyStatus = "string",
#' ParameterApplyErrorDescription = "string"
#' )
#' )
#' )
#' ),
#' ClusterSubnetGroupName = "string",
#' VpcId = "string",
#' AvailabilityZone = "string",
#' PreferredMaintenanceWindow = "string",
#' PendingModifiedValues = list(
#' MasterUserPassword = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' ClusterType = "string",
#' ClusterVersion = "string",
#' AutomatedSnapshotRetentionPeriod = 123,
#' ClusterIdentifier = "string",
#' PubliclyAccessible = TRUE|FALSE,
#' EnhancedVpcRouting = TRUE|FALSE,
#' MaintenanceTrackName = "string",
#' EncryptionType = "string"
#' ),
#' ClusterVersion = "string",
#' AllowVersionUpgrade = TRUE|FALSE,
#' NumberOfNodes = 123,
#' PubliclyAccessible = TRUE|FALSE,
#' Encrypted = TRUE|FALSE,
#' RestoreStatus = list(
#' Status = "string",
#' CurrentRestoreRateInMegaBytesPerSecond = 123.0,
#' SnapshotSizeInMegaBytes = 123,
#' ProgressInMegaBytes = 123,
#' ElapsedTimeInSeconds = 123,
#' EstimatedTimeToCompletionInSeconds = 123
#' ),
#' DataTransferProgress = list(
#' Status = "string",
#' CurrentRateInMegaBytesPerSecond = 123.0,
#' TotalDataInMegaBytes = 123,
#' DataTransferredInMegaBytes = 123,
#' EstimatedTimeToCompletionInSeconds = 123,
#' ElapsedTimeInSeconds = 123
#' ),
#' HsmStatus = list(
#' HsmClientCertificateIdentifier = "string",
#' HsmConfigurationIdentifier = "string",
#' Status = "string"
#' ),
#' ClusterSnapshotCopyStatus = list(
#' DestinationRegion = "string",
#' RetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' SnapshotCopyGrantName = "string"
#' ),
#' ClusterPublicKey = "string",
#' ClusterNodes = list(
#' list(
#' NodeRole = "string",
#' PrivateIPAddress = "string",
#' PublicIPAddress = "string"
#' )
#' ),
#' ElasticIpStatus = list(
#' ElasticIp = "string",
#' Status = "string"
#' ),
#' ClusterRevisionNumber = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' ),
#' KmsKeyId = "string",
#' EnhancedVpcRouting = TRUE|FALSE,
#' IamRoles = list(
#' list(
#' IamRoleArn = "string",
#' ApplyStatus = "string"
#' )
#' ),
#' PendingActions = list(
#' "string"
#' ),
#' MaintenanceTrackName = "string",
#' ElasticResizeNumberOfNodeOptions = "string",
#' DeferredMaintenanceWindows = list(
#' list(
#' DeferMaintenanceIdentifier = "string",
#' DeferMaintenanceStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' DeferMaintenanceEndTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' ),
#' SnapshotScheduleIdentifier = "string",
#' SnapshotScheduleState = "MODIFYING"|"ACTIVE"|"FAILED",
#' ExpectedNextSnapshotScheduleTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ExpectedNextSnapshotScheduleTimeStatus = "string",
#' NextMaintenanceWindowStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ResizeInfo = list(
#' ResizeType = "string",
#' AllowCancelResize = TRUE|FALSE
#' ),
#' AvailabilityZoneRelocationStatus = "string",
#' ClusterNamespaceArn = "string"
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$resize_cluster(
#' ClusterIdentifier = "string",
#' ClusterType = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' Classic = TRUE|FALSE
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_resize_cluster
redshift_resize_cluster <- function(ClusterIdentifier, ClusterType = NULL, NodeType = NULL, NumberOfNodes = NULL, Classic = NULL) {
op <- new_operation(
name = "ResizeCluster",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$resize_cluster_input(ClusterIdentifier = ClusterIdentifier, ClusterType = ClusterType, NodeType = NodeType, NumberOfNodes = NumberOfNodes, Classic = Classic)
output <- .redshift$resize_cluster_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$resize_cluster <- redshift_resize_cluster
#' Creates a new cluster from a snapshot
#'
#' @description
#' Creates a new cluster from a snapshot. By default, Amazon Redshift
#' creates the resulting cluster with the same configuration as the
#' original cluster from which the snapshot was created, except that the
#' new cluster is created with the default cluster security and parameter
#' groups. After Amazon Redshift creates the cluster, you can use the
#' [`modify_cluster`][redshift_modify_cluster] API to associate a different
#' security group and different parameter group with the restored cluster.
#' If you are using a DS node type, you can also choose to change to
#' another DS node type of the same size during restore.
#'
#' If you restore a cluster into a VPC, you must provide a cluster subnet
#' group where you want the cluster restored.
#'
#' For more information about working with snapshots, go to [Amazon
#' Redshift
#' Snapshots](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-snapshots.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' @usage
#' redshift_restore_from_cluster_snapshot(ClusterIdentifier,
#' SnapshotIdentifier, SnapshotClusterIdentifier, Port, AvailabilityZone,
#' AllowVersionUpgrade, ClusterSubnetGroupName, PubliclyAccessible,
#' OwnerAccount, HsmClientCertificateIdentifier,
#' HsmConfigurationIdentifier, ElasticIp, ClusterParameterGroupName,
#' ClusterSecurityGroups, VpcSecurityGroupIds, PreferredMaintenanceWindow,
#' AutomatedSnapshotRetentionPeriod, ManualSnapshotRetentionPeriod,
#' KmsKeyId, NodeType, EnhancedVpcRouting, AdditionalInfo, IamRoles,
#' MaintenanceTrackName, SnapshotScheduleIdentifier, NumberOfNodes,
#' AvailabilityZoneRelocation)
#'
#' @param ClusterIdentifier [required] The identifier of the cluster that will be created from restoring the
#' snapshot.
#'
#' Constraints:
#'
#' - Must contain from 1 to 63 alphanumeric characters or hyphens.
#'
#' - Alphabetic characters must be lowercase.
#'
#' - First character must be a letter.
#'
#' - Cannot end with a hyphen or contain two consecutive hyphens.
#'
#' - Must be unique for all clusters within an AWS account.
#' @param SnapshotIdentifier [required] The name of the snapshot from which to create the new cluster. This
#' parameter isn't case sensitive.
#'
#' Example: `my-snapshot-id`
#' @param SnapshotClusterIdentifier The name of the cluster the source snapshot was created from. This
#' parameter is required if your IAM user has a policy containing a
#' snapshot resource element that specifies anything other than * for the
#' cluster name.
#' @param Port The port number on which the cluster accepts connections.
#'
#' Default: The same port as the original cluster.
#'
#' Constraints: Must be between `1115` and `65535`.
#' @param AvailabilityZone The Amazon EC2 Availability Zone in which to restore the cluster.
#'
#' Default: A random, system-chosen Availability Zone.
#'
#' Example: `us-east-2a`
#' @param AllowVersionUpgrade If `true`, major version upgrades can be applied during the maintenance
#' window to the Amazon Redshift engine that is running on the cluster.
#'
#' Default: `true`
#' @param ClusterSubnetGroupName The name of the subnet group where you want to cluster restored.
#'
#' A snapshot of cluster in VPC can be restored only in VPC. Therefore, you
#' must provide subnet group name where you want the cluster restored.
#' @param PubliclyAccessible If `true`, the cluster can be accessed from a public network.
#' @param OwnerAccount The AWS customer account used to create or copy the snapshot. Required
#' if you are restoring a snapshot you do not own, optional if you own the
#' snapshot.
#' @param HsmClientCertificateIdentifier Specifies the name of the HSM client certificate the Amazon Redshift
#' cluster uses to retrieve the data encryption keys stored in an HSM.
#' @param HsmConfigurationIdentifier Specifies the name of the HSM configuration that contains the
#' information the Amazon Redshift cluster can use to retrieve and store
#' keys in an HSM.
#' @param ElasticIp The elastic IP (EIP) address for the cluster.
#' @param ClusterParameterGroupName The name of the parameter group to be associated with this cluster.
#'
#' Default: The default Amazon Redshift cluster parameter group. For
#' information about the default parameter group, go to [Working with
#' Amazon Redshift Parameter
#' Groups](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html).
#'
#' Constraints:
#'
#' - Must be 1 to 255 alphanumeric characters or hyphens.
#'
#' - First character must be a letter.
#'
#' - Cannot end with a hyphen or contain two consecutive hyphens.
#' @param ClusterSecurityGroups A list of security groups to be associated with this cluster.
#'
#' Default: The default cluster security group for Amazon Redshift.
#'
#' Cluster security groups only apply to clusters outside of VPCs.
#' @param VpcSecurityGroupIds A list of Virtual Private Cloud (VPC) security groups to be associated
#' with the cluster.
#'
#' Default: The default VPC security group is associated with the cluster.
#'
#' VPC security groups only apply to clusters in VPCs.
#' @param PreferredMaintenanceWindow The weekly time range (in UTC) during which automated cluster
#' maintenance can occur.
#'
#' Format: `ddd:hh24:mi-ddd:hh24:mi`
#'
#' Default: The value selected for the cluster from which the snapshot was
#' taken. For more information about the time blocks for each region, see
#' [Maintenance
#' Windows](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#rs-maintenance-windows)
#' in Amazon Redshift Cluster Management Guide.
#'
#' Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun
#'
#' Constraints: Minimum 30-minute window.
#' @param AutomatedSnapshotRetentionPeriod The number of days that automated snapshots are retained. If the value
#' is 0, automated snapshots are disabled. Even if automated snapshots are
#' disabled, you can still create manual snapshots when you want with
#' [`create_cluster_snapshot`][redshift_create_cluster_snapshot].
#'
#' Default: The value selected for the cluster from which the snapshot was
#' taken.
#'
#' Constraints: Must be a value from 0 to 35.
#' @param ManualSnapshotRetentionPeriod The default number of days to retain a manual snapshot. If the value is
#' -1, the snapshot is retained indefinitely. This setting doesn't change
#' the retention period of existing snapshots.
#'
#' The value must be either -1 or an integer between 1 and 3,653.
#' @param KmsKeyId The AWS Key Management Service (KMS) key ID of the encryption key that
#' you want to use to encrypt data in the cluster that you restore from a
#' shared snapshot.
#' @param NodeType The node type that the restored cluster will be provisioned with.
#'
#' Default: The node type of the cluster from which the snapshot was taken.
#' You can modify this if you are using any DS node type. In that case, you
#' can choose to restore into another DS node type of the same size. For
#' example, you can restore ds1.8xlarge into ds2.8xlarge, or ds1.xlarge
#' into ds2.xlarge. If you have a DC instance type, you must restore into
#' that same instance type and size. In other words, you can only restore a
#' dc1.large instance type into another dc1.large instance type or
#' dc2.large instance type. You can't restore dc1.8xlarge to dc2.8xlarge.
#' First restore to a dc1.8xlarge cluster, then resize to a dc2.8large
#' cluster. For more information about node types, see [About Clusters and
#' Nodes](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#rs-about-clusters-and-nodes)
#' in the *Amazon Redshift Cluster Management Guide*.
#' @param EnhancedVpcRouting An option that specifies whether to create the cluster with enhanced VPC
#' routing enabled. To create a cluster that uses enhanced VPC routing, the
#' cluster must be in a VPC. For more information, see [Enhanced VPC
#' Routing](https://docs.aws.amazon.com/redshift/latest/mgmt/enhanced-vpc-routing.html)
#' in the Amazon Redshift Cluster Management Guide.
#'
#' If this option is `true`, enhanced VPC routing is enabled.
#'
#' Default: false
#' @param AdditionalInfo Reserved.
#' @param IamRoles A list of AWS Identity and Access Management (IAM) roles that can be
#' used by the cluster to access other AWS services. You must supply the
#' IAM roles in their Amazon Resource Name (ARN) format. You can supply up
#' to 10 IAM roles in a single request.
#'
#' A cluster can have up to 10 IAM roles associated at any time.
#' @param MaintenanceTrackName The name of the maintenance track for the restored cluster. When you
#' take a snapshot, the snapshot inherits the `MaintenanceTrack` value from
#' the cluster. The snapshot might be on a different track than the cluster
#' that was the source for the snapshot. For example, suppose that you take
#' a snapshot of a cluster that is on the current track and then change the
#' cluster to be on the trailing track. In this case, the snapshot and the
#' source cluster are on different tracks.
#' @param SnapshotScheduleIdentifier A unique identifier for the snapshot schedule.
#' @param NumberOfNodes The number of nodes specified when provisioning the restored cluster.
#' @param AvailabilityZoneRelocation The option to enable relocation for an Amazon Redshift cluster between
#' Availability Zones after the cluster is restored.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Cluster = list(
#' ClusterIdentifier = "string",
#' NodeType = "string",
#' ClusterStatus = "string",
#' ClusterAvailabilityStatus = "string",
#' ModifyStatus = "string",
#' MasterUsername = "string",
#' DBName = "string",
#' Endpoint = list(
#' Address = "string",
#' Port = 123,
#' VpcEndpoints = list(
#' list(
#' VpcEndpointId = "string"
#' )
#' )
#' ),
#' ClusterCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' AutomatedSnapshotRetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' ClusterSecurityGroups = list(
#' list(
#' ClusterSecurityGroupName = "string",
#' Status = "string"
#' )
#' ),
#' VpcSecurityGroups = list(
#' list(
#' VpcSecurityGroupId = "string",
#' Status = "string"
#' )
#' ),
#' ClusterParameterGroups = list(
#' list(
#' ParameterGroupName = "string",
#' ParameterApplyStatus = "string",
#' ClusterParameterStatusList = list(
#' list(
#' ParameterName = "string",
#' ParameterApplyStatus = "string",
#' ParameterApplyErrorDescription = "string"
#' )
#' )
#' )
#' ),
#' ClusterSubnetGroupName = "string",
#' VpcId = "string",
#' AvailabilityZone = "string",
#' PreferredMaintenanceWindow = "string",
#' PendingModifiedValues = list(
#' MasterUserPassword = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' ClusterType = "string",
#' ClusterVersion = "string",
#' AutomatedSnapshotRetentionPeriod = 123,
#' ClusterIdentifier = "string",
#' PubliclyAccessible = TRUE|FALSE,
#' EnhancedVpcRouting = TRUE|FALSE,
#' MaintenanceTrackName = "string",
#' EncryptionType = "string"
#' ),
#' ClusterVersion = "string",
#' AllowVersionUpgrade = TRUE|FALSE,
#' NumberOfNodes = 123,
#' PubliclyAccessible = TRUE|FALSE,
#' Encrypted = TRUE|FALSE,
#' RestoreStatus = list(
#' Status = "string",
#' CurrentRestoreRateInMegaBytesPerSecond = 123.0,
#' SnapshotSizeInMegaBytes = 123,
#' ProgressInMegaBytes = 123,
#' ElapsedTimeInSeconds = 123,
#' EstimatedTimeToCompletionInSeconds = 123
#' ),
#' DataTransferProgress = list(
#' Status = "string",
#' CurrentRateInMegaBytesPerSecond = 123.0,
#' TotalDataInMegaBytes = 123,
#' DataTransferredInMegaBytes = 123,
#' EstimatedTimeToCompletionInSeconds = 123,
#' ElapsedTimeInSeconds = 123
#' ),
#' HsmStatus = list(
#' HsmClientCertificateIdentifier = "string",
#' HsmConfigurationIdentifier = "string",
#' Status = "string"
#' ),
#' ClusterSnapshotCopyStatus = list(
#' DestinationRegion = "string",
#' RetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' SnapshotCopyGrantName = "string"
#' ),
#' ClusterPublicKey = "string",
#' ClusterNodes = list(
#' list(
#' NodeRole = "string",
#' PrivateIPAddress = "string",
#' PublicIPAddress = "string"
#' )
#' ),
#' ElasticIpStatus = list(
#' ElasticIp = "string",
#' Status = "string"
#' ),
#' ClusterRevisionNumber = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' ),
#' KmsKeyId = "string",
#' EnhancedVpcRouting = TRUE|FALSE,
#' IamRoles = list(
#' list(
#' IamRoleArn = "string",
#' ApplyStatus = "string"
#' )
#' ),
#' PendingActions = list(
#' "string"
#' ),
#' MaintenanceTrackName = "string",
#' ElasticResizeNumberOfNodeOptions = "string",
#' DeferredMaintenanceWindows = list(
#' list(
#' DeferMaintenanceIdentifier = "string",
#' DeferMaintenanceStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' DeferMaintenanceEndTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' ),
#' SnapshotScheduleIdentifier = "string",
#' SnapshotScheduleState = "MODIFYING"|"ACTIVE"|"FAILED",
#' ExpectedNextSnapshotScheduleTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ExpectedNextSnapshotScheduleTimeStatus = "string",
#' NextMaintenanceWindowStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ResizeInfo = list(
#' ResizeType = "string",
#' AllowCancelResize = TRUE|FALSE
#' ),
#' AvailabilityZoneRelocationStatus = "string",
#' ClusterNamespaceArn = "string"
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$restore_from_cluster_snapshot(
#' ClusterIdentifier = "string",
#' SnapshotIdentifier = "string",
#' SnapshotClusterIdentifier = "string",
#' Port = 123,
#' AvailabilityZone = "string",
#' AllowVersionUpgrade = TRUE|FALSE,
#' ClusterSubnetGroupName = "string",
#' PubliclyAccessible = TRUE|FALSE,
#' OwnerAccount = "string",
#' HsmClientCertificateIdentifier = "string",
#' HsmConfigurationIdentifier = "string",
#' ElasticIp = "string",
#' ClusterParameterGroupName = "string",
#' ClusterSecurityGroups = list(
#' "string"
#' ),
#' VpcSecurityGroupIds = list(
#' "string"
#' ),
#' PreferredMaintenanceWindow = "string",
#' AutomatedSnapshotRetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' KmsKeyId = "string",
#' NodeType = "string",
#' EnhancedVpcRouting = TRUE|FALSE,
#' AdditionalInfo = "string",
#' IamRoles = list(
#' "string"
#' ),
#' MaintenanceTrackName = "string",
#' SnapshotScheduleIdentifier = "string",
#' NumberOfNodes = 123,
#' AvailabilityZoneRelocation = TRUE|FALSE
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_restore_from_cluster_snapshot
redshift_restore_from_cluster_snapshot <- function(ClusterIdentifier, SnapshotIdentifier, SnapshotClusterIdentifier = NULL, Port = NULL, AvailabilityZone = NULL, AllowVersionUpgrade = NULL, ClusterSubnetGroupName = NULL, PubliclyAccessible = NULL, OwnerAccount = NULL, HsmClientCertificateIdentifier = NULL, HsmConfigurationIdentifier = NULL, ElasticIp = NULL, ClusterParameterGroupName = NULL, ClusterSecurityGroups = NULL, VpcSecurityGroupIds = NULL, PreferredMaintenanceWindow = NULL, AutomatedSnapshotRetentionPeriod = NULL, ManualSnapshotRetentionPeriod = NULL, KmsKeyId = NULL, NodeType = NULL, EnhancedVpcRouting = NULL, AdditionalInfo = NULL, IamRoles = NULL, MaintenanceTrackName = NULL, SnapshotScheduleIdentifier = NULL, NumberOfNodes = NULL, AvailabilityZoneRelocation = NULL) {
op <- new_operation(
name = "RestoreFromClusterSnapshot",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$restore_from_cluster_snapshot_input(ClusterIdentifier = ClusterIdentifier, SnapshotIdentifier = SnapshotIdentifier, SnapshotClusterIdentifier = SnapshotClusterIdentifier, Port = Port, AvailabilityZone = AvailabilityZone, AllowVersionUpgrade = AllowVersionUpgrade, ClusterSubnetGroupName = ClusterSubnetGroupName, PubliclyAccessible = PubliclyAccessible, OwnerAccount = OwnerAccount, HsmClientCertificateIdentifier = HsmClientCertificateIdentifier, HsmConfigurationIdentifier = HsmConfigurationIdentifier, ElasticIp = ElasticIp, ClusterParameterGroupName = ClusterParameterGroupName, ClusterSecurityGroups = ClusterSecurityGroups, VpcSecurityGroupIds = VpcSecurityGroupIds, PreferredMaintenanceWindow = PreferredMaintenanceWindow, AutomatedSnapshotRetentionPeriod = AutomatedSnapshotRetentionPeriod, ManualSnapshotRetentionPeriod = ManualSnapshotRetentionPeriod, KmsKeyId = KmsKeyId, NodeType = NodeType, EnhancedVpcRouting = EnhancedVpcRouting, AdditionalInfo = AdditionalInfo, IamRoles = IamRoles, MaintenanceTrackName = MaintenanceTrackName, SnapshotScheduleIdentifier = SnapshotScheduleIdentifier, NumberOfNodes = NumberOfNodes, AvailabilityZoneRelocation = AvailabilityZoneRelocation)
output <- .redshift$restore_from_cluster_snapshot_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$restore_from_cluster_snapshot <- redshift_restore_from_cluster_snapshot
#' Creates a new table from a table in an Amazon Redshift cluster snapshot
#'
#' @description
#' Creates a new table from a table in an Amazon Redshift cluster snapshot.
#' You must create the new table within the Amazon Redshift cluster that
#' the snapshot was taken from.
#'
#' You cannot use
#' [`restore_table_from_cluster_snapshot`][redshift_restore_table_from_cluster_snapshot]
#' to restore a table with the same name as an existing table in an Amazon
#' Redshift cluster. That is, you cannot overwrite an existing table in a
#' cluster with a restored table. If you want to replace your original
#' table with a new, restored table, then rename or drop your original
#' table before you call
#' [`restore_table_from_cluster_snapshot`][redshift_restore_table_from_cluster_snapshot].
#' When you have renamed your original table, then you can pass the
#' original name of the table as the `NewTableName` parameter value in the
#' call to
#' [`restore_table_from_cluster_snapshot`][redshift_restore_table_from_cluster_snapshot].
#' This way, you can replace the original table with the table created from
#' the snapshot.
#'
#' @usage
#' redshift_restore_table_from_cluster_snapshot(ClusterIdentifier,
#' SnapshotIdentifier, SourceDatabaseName, SourceSchemaName,
#' SourceTableName, TargetDatabaseName, TargetSchemaName, NewTableName)
#'
#' @param ClusterIdentifier [required] The identifier of the Amazon Redshift cluster to restore the table to.
#' @param SnapshotIdentifier [required] The identifier of the snapshot to restore the table from. This snapshot
#' must have been created from the Amazon Redshift cluster specified by the
#' `ClusterIdentifier` parameter.
#' @param SourceDatabaseName [required] The name of the source database that contains the table to restore from.
#' @param SourceSchemaName The name of the source schema that contains the table to restore from.
#' If you do not specify a `SourceSchemaName` value, the default is
#' `public`.
#' @param SourceTableName [required] The name of the source table to restore from.
#' @param TargetDatabaseName The name of the database to restore the table to.
#' @param TargetSchemaName The name of the schema to restore the table to.
#' @param NewTableName [required] The name of the table to create as a result of the current request.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' TableRestoreStatus = list(
#' TableRestoreRequestId = "string",
#' Status = "PENDING"|"IN_PROGRESS"|"SUCCEEDED"|"FAILED"|"CANCELED",
#' Message = "string",
#' RequestTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ProgressInMegaBytes = 123,
#' TotalDataInMegaBytes = 123,
#' ClusterIdentifier = "string",
#' SnapshotIdentifier = "string",
#' SourceDatabaseName = "string",
#' SourceSchemaName = "string",
#' SourceTableName = "string",
#' TargetDatabaseName = "string",
#' TargetSchemaName = "string",
#' NewTableName = "string"
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$restore_table_from_cluster_snapshot(
#' ClusterIdentifier = "string",
#' SnapshotIdentifier = "string",
#' SourceDatabaseName = "string",
#' SourceSchemaName = "string",
#' SourceTableName = "string",
#' TargetDatabaseName = "string",
#' TargetSchemaName = "string",
#' NewTableName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_restore_table_from_cluster_snapshot
redshift_restore_table_from_cluster_snapshot <- function(ClusterIdentifier, SnapshotIdentifier, SourceDatabaseName, SourceSchemaName = NULL, SourceTableName, TargetDatabaseName = NULL, TargetSchemaName = NULL, NewTableName) {
op <- new_operation(
name = "RestoreTableFromClusterSnapshot",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$restore_table_from_cluster_snapshot_input(ClusterIdentifier = ClusterIdentifier, SnapshotIdentifier = SnapshotIdentifier, SourceDatabaseName = SourceDatabaseName, SourceSchemaName = SourceSchemaName, SourceTableName = SourceTableName, TargetDatabaseName = TargetDatabaseName, TargetSchemaName = TargetSchemaName, NewTableName = NewTableName)
output <- .redshift$restore_table_from_cluster_snapshot_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$restore_table_from_cluster_snapshot <- redshift_restore_table_from_cluster_snapshot
#' Resumes a paused cluster
#'
#' @description
#' Resumes a paused cluster.
#'
#' @usage
#' redshift_resume_cluster(ClusterIdentifier)
#'
#' @param ClusterIdentifier [required] The identifier of the cluster to be resumed.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Cluster = list(
#' ClusterIdentifier = "string",
#' NodeType = "string",
#' ClusterStatus = "string",
#' ClusterAvailabilityStatus = "string",
#' ModifyStatus = "string",
#' MasterUsername = "string",
#' DBName = "string",
#' Endpoint = list(
#' Address = "string",
#' Port = 123,
#' VpcEndpoints = list(
#' list(
#' VpcEndpointId = "string"
#' )
#' )
#' ),
#' ClusterCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' AutomatedSnapshotRetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' ClusterSecurityGroups = list(
#' list(
#' ClusterSecurityGroupName = "string",
#' Status = "string"
#' )
#' ),
#' VpcSecurityGroups = list(
#' list(
#' VpcSecurityGroupId = "string",
#' Status = "string"
#' )
#' ),
#' ClusterParameterGroups = list(
#' list(
#' ParameterGroupName = "string",
#' ParameterApplyStatus = "string",
#' ClusterParameterStatusList = list(
#' list(
#' ParameterName = "string",
#' ParameterApplyStatus = "string",
#' ParameterApplyErrorDescription = "string"
#' )
#' )
#' )
#' ),
#' ClusterSubnetGroupName = "string",
#' VpcId = "string",
#' AvailabilityZone = "string",
#' PreferredMaintenanceWindow = "string",
#' PendingModifiedValues = list(
#' MasterUserPassword = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' ClusterType = "string",
#' ClusterVersion = "string",
#' AutomatedSnapshotRetentionPeriod = 123,
#' ClusterIdentifier = "string",
#' PubliclyAccessible = TRUE|FALSE,
#' EnhancedVpcRouting = TRUE|FALSE,
#' MaintenanceTrackName = "string",
#' EncryptionType = "string"
#' ),
#' ClusterVersion = "string",
#' AllowVersionUpgrade = TRUE|FALSE,
#' NumberOfNodes = 123,
#' PubliclyAccessible = TRUE|FALSE,
#' Encrypted = TRUE|FALSE,
#' RestoreStatus = list(
#' Status = "string",
#' CurrentRestoreRateInMegaBytesPerSecond = 123.0,
#' SnapshotSizeInMegaBytes = 123,
#' ProgressInMegaBytes = 123,
#' ElapsedTimeInSeconds = 123,
#' EstimatedTimeToCompletionInSeconds = 123
#' ),
#' DataTransferProgress = list(
#' Status = "string",
#' CurrentRateInMegaBytesPerSecond = 123.0,
#' TotalDataInMegaBytes = 123,
#' DataTransferredInMegaBytes = 123,
#' EstimatedTimeToCompletionInSeconds = 123,
#' ElapsedTimeInSeconds = 123
#' ),
#' HsmStatus = list(
#' HsmClientCertificateIdentifier = "string",
#' HsmConfigurationIdentifier = "string",
#' Status = "string"
#' ),
#' ClusterSnapshotCopyStatus = list(
#' DestinationRegion = "string",
#' RetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' SnapshotCopyGrantName = "string"
#' ),
#' ClusterPublicKey = "string",
#' ClusterNodes = list(
#' list(
#' NodeRole = "string",
#' PrivateIPAddress = "string",
#' PublicIPAddress = "string"
#' )
#' ),
#' ElasticIpStatus = list(
#' ElasticIp = "string",
#' Status = "string"
#' ),
#' ClusterRevisionNumber = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' ),
#' KmsKeyId = "string",
#' EnhancedVpcRouting = TRUE|FALSE,
#' IamRoles = list(
#' list(
#' IamRoleArn = "string",
#' ApplyStatus = "string"
#' )
#' ),
#' PendingActions = list(
#' "string"
#' ),
#' MaintenanceTrackName = "string",
#' ElasticResizeNumberOfNodeOptions = "string",
#' DeferredMaintenanceWindows = list(
#' list(
#' DeferMaintenanceIdentifier = "string",
#' DeferMaintenanceStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' DeferMaintenanceEndTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' ),
#' SnapshotScheduleIdentifier = "string",
#' SnapshotScheduleState = "MODIFYING"|"ACTIVE"|"FAILED",
#' ExpectedNextSnapshotScheduleTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ExpectedNextSnapshotScheduleTimeStatus = "string",
#' NextMaintenanceWindowStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ResizeInfo = list(
#' ResizeType = "string",
#' AllowCancelResize = TRUE|FALSE
#' ),
#' AvailabilityZoneRelocationStatus = "string",
#' ClusterNamespaceArn = "string"
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$resume_cluster(
#' ClusterIdentifier = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_resume_cluster
redshift_resume_cluster <- function(ClusterIdentifier) {
op <- new_operation(
name = "ResumeCluster",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$resume_cluster_input(ClusterIdentifier = ClusterIdentifier)
output <- .redshift$resume_cluster_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$resume_cluster <- redshift_resume_cluster
#' Revokes an ingress rule in an Amazon Redshift security group for a
#' previously authorized IP range or Amazon EC2 security group
#'
#' @description
#' Revokes an ingress rule in an Amazon Redshift security group for a
#' previously authorized IP range or Amazon EC2 security group. To add an
#' ingress rule, see
#' [`authorize_cluster_security_group_ingress`][redshift_authorize_cluster_security_group_ingress].
#' For information about managing security groups, go to [Amazon Redshift
#' Cluster Security
#' Groups](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-security-groups.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' @usage
#' redshift_revoke_cluster_security_group_ingress(ClusterSecurityGroupName,
#' CIDRIP, EC2SecurityGroupName, EC2SecurityGroupOwnerId)
#'
#' @param ClusterSecurityGroupName [required] The name of the security Group from which to revoke the ingress rule.
#' @param CIDRIP The IP range for which to revoke access. This range must be a valid
#' Classless Inter-Domain Routing (CIDR) block of IP addresses. If `CIDRIP`
#' is specified, `EC2SecurityGroupName` and `EC2SecurityGroupOwnerId`
#' cannot be provided.
#' @param EC2SecurityGroupName The name of the EC2 Security Group whose access is to be revoked. If
#' `EC2SecurityGroupName` is specified, `EC2SecurityGroupOwnerId` must also
#' be provided and `CIDRIP` cannot be provided.
#' @param EC2SecurityGroupOwnerId The AWS account number of the owner of the security group specified in
#' the `EC2SecurityGroupName` parameter. The AWS access key ID is not an
#' acceptable value. If `EC2SecurityGroupOwnerId` is specified,
#' `EC2SecurityGroupName` must also be provided. and `CIDRIP` cannot be
#' provided.
#'
#' Example: `111122223333`
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' ClusterSecurityGroup = list(
#' ClusterSecurityGroupName = "string",
#' Description = "string",
#' EC2SecurityGroups = list(
#' list(
#' Status = "string",
#' EC2SecurityGroupName = "string",
#' EC2SecurityGroupOwnerId = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' ),
#' IPRanges = list(
#' list(
#' Status = "string",
#' CIDRIP = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' ),
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$revoke_cluster_security_group_ingress(
#' ClusterSecurityGroupName = "string",
#' CIDRIP = "string",
#' EC2SecurityGroupName = "string",
#' EC2SecurityGroupOwnerId = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_revoke_cluster_security_group_ingress
redshift_revoke_cluster_security_group_ingress <- function(ClusterSecurityGroupName, CIDRIP = NULL, EC2SecurityGroupName = NULL, EC2SecurityGroupOwnerId = NULL) {
op <- new_operation(
name = "RevokeClusterSecurityGroupIngress",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$revoke_cluster_security_group_ingress_input(ClusterSecurityGroupName = ClusterSecurityGroupName, CIDRIP = CIDRIP, EC2SecurityGroupName = EC2SecurityGroupName, EC2SecurityGroupOwnerId = EC2SecurityGroupOwnerId)
output <- .redshift$revoke_cluster_security_group_ingress_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$revoke_cluster_security_group_ingress <- redshift_revoke_cluster_security_group_ingress
#' Removes the ability of the specified AWS customer account to restore the
#' specified snapshot
#'
#' @description
#' Removes the ability of the specified AWS customer account to restore the
#' specified snapshot. If the account is currently restoring the snapshot,
#' the restore will run to completion.
#'
#' For more information about working with snapshots, go to [Amazon
#' Redshift
#' Snapshots](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-snapshots.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' @usage
#' redshift_revoke_snapshot_access(SnapshotIdentifier,
#' SnapshotClusterIdentifier, AccountWithRestoreAccess)
#'
#' @param SnapshotIdentifier [required] The identifier of the snapshot that the account can no longer access.
#' @param SnapshotClusterIdentifier The identifier of the cluster the snapshot was created from. This
#' parameter is required if your IAM user has a policy containing a
#' snapshot resource element that specifies anything other than * for the
#' cluster name.
#' @param AccountWithRestoreAccess [required] The identifier of the AWS customer account that can no longer restore
#' the specified snapshot.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Snapshot = list(
#' SnapshotIdentifier = "string",
#' ClusterIdentifier = "string",
#' SnapshotCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' Status = "string",
#' Port = 123,
#' AvailabilityZone = "string",
#' ClusterCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' MasterUsername = "string",
#' ClusterVersion = "string",
#' EngineFullVersion = "string",
#' SnapshotType = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' DBName = "string",
#' VpcId = "string",
#' Encrypted = TRUE|FALSE,
#' KmsKeyId = "string",
#' EncryptedWithHSM = TRUE|FALSE,
#' AccountsWithRestoreAccess = list(
#' list(
#' AccountId = "string",
#' AccountAlias = "string"
#' )
#' ),
#' OwnerAccount = "string",
#' TotalBackupSizeInMegaBytes = 123.0,
#' ActualIncrementalBackupSizeInMegaBytes = 123.0,
#' BackupProgressInMegaBytes = 123.0,
#' CurrentBackupRateInMegaBytesPerSecond = 123.0,
#' EstimatedSecondsToCompletion = 123,
#' ElapsedTimeInSeconds = 123,
#' SourceRegion = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' ),
#' RestorableNodeTypes = list(
#' "string"
#' ),
#' EnhancedVpcRouting = TRUE|FALSE,
#' MaintenanceTrackName = "string",
#' ManualSnapshotRetentionPeriod = 123,
#' ManualSnapshotRemainingDays = 123,
#' SnapshotRetentionStartTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$revoke_snapshot_access(
#' SnapshotIdentifier = "string",
#' SnapshotClusterIdentifier = "string",
#' AccountWithRestoreAccess = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_revoke_snapshot_access
redshift_revoke_snapshot_access <- function(SnapshotIdentifier, SnapshotClusterIdentifier = NULL, AccountWithRestoreAccess) {
op <- new_operation(
name = "RevokeSnapshotAccess",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$revoke_snapshot_access_input(SnapshotIdentifier = SnapshotIdentifier, SnapshotClusterIdentifier = SnapshotClusterIdentifier, AccountWithRestoreAccess = AccountWithRestoreAccess)
output <- .redshift$revoke_snapshot_access_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$revoke_snapshot_access <- redshift_revoke_snapshot_access
#' Rotates the encryption keys for a cluster
#'
#' @description
#' Rotates the encryption keys for a cluster.
#'
#' @usage
#' redshift_rotate_encryption_key(ClusterIdentifier)
#'
#' @param ClusterIdentifier [required] The unique identifier of the cluster that you want to rotate the
#' encryption keys for.
#'
#' Constraints: Must be the name of valid cluster that has encryption
#' enabled.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Cluster = list(
#' ClusterIdentifier = "string",
#' NodeType = "string",
#' ClusterStatus = "string",
#' ClusterAvailabilityStatus = "string",
#' ModifyStatus = "string",
#' MasterUsername = "string",
#' DBName = "string",
#' Endpoint = list(
#' Address = "string",
#' Port = 123,
#' VpcEndpoints = list(
#' list(
#' VpcEndpointId = "string"
#' )
#' )
#' ),
#' ClusterCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' AutomatedSnapshotRetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' ClusterSecurityGroups = list(
#' list(
#' ClusterSecurityGroupName = "string",
#' Status = "string"
#' )
#' ),
#' VpcSecurityGroups = list(
#' list(
#' VpcSecurityGroupId = "string",
#' Status = "string"
#' )
#' ),
#' ClusterParameterGroups = list(
#' list(
#' ParameterGroupName = "string",
#' ParameterApplyStatus = "string",
#' ClusterParameterStatusList = list(
#' list(
#' ParameterName = "string",
#' ParameterApplyStatus = "string",
#' ParameterApplyErrorDescription = "string"
#' )
#' )
#' )
#' ),
#' ClusterSubnetGroupName = "string",
#' VpcId = "string",
#' AvailabilityZone = "string",
#' PreferredMaintenanceWindow = "string",
#' PendingModifiedValues = list(
#' MasterUserPassword = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' ClusterType = "string",
#' ClusterVersion = "string",
#' AutomatedSnapshotRetentionPeriod = 123,
#' ClusterIdentifier = "string",
#' PubliclyAccessible = TRUE|FALSE,
#' EnhancedVpcRouting = TRUE|FALSE,
#' MaintenanceTrackName = "string",
#' EncryptionType = "string"
#' ),
#' ClusterVersion = "string",
#' AllowVersionUpgrade = TRUE|FALSE,
#' NumberOfNodes = 123,
#' PubliclyAccessible = TRUE|FALSE,
#' Encrypted = TRUE|FALSE,
#' RestoreStatus = list(
#' Status = "string",
#' CurrentRestoreRateInMegaBytesPerSecond = 123.0,
#' SnapshotSizeInMegaBytes = 123,
#' ProgressInMegaBytes = 123,
#' ElapsedTimeInSeconds = 123,
#' EstimatedTimeToCompletionInSeconds = 123
#' ),
#' DataTransferProgress = list(
#' Status = "string",
#' CurrentRateInMegaBytesPerSecond = 123.0,
#' TotalDataInMegaBytes = 123,
#' DataTransferredInMegaBytes = 123,
#' EstimatedTimeToCompletionInSeconds = 123,
#' ElapsedTimeInSeconds = 123
#' ),
#' HsmStatus = list(
#' HsmClientCertificateIdentifier = "string",
#' HsmConfigurationIdentifier = "string",
#' Status = "string"
#' ),
#' ClusterSnapshotCopyStatus = list(
#' DestinationRegion = "string",
#' RetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' SnapshotCopyGrantName = "string"
#' ),
#' ClusterPublicKey = "string",
#' ClusterNodes = list(
#' list(
#' NodeRole = "string",
#' PrivateIPAddress = "string",
#' PublicIPAddress = "string"
#' )
#' ),
#' ElasticIpStatus = list(
#' ElasticIp = "string",
#' Status = "string"
#' ),
#' ClusterRevisionNumber = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' ),
#' KmsKeyId = "string",
#' EnhancedVpcRouting = TRUE|FALSE,
#' IamRoles = list(
#' list(
#' IamRoleArn = "string",
#' ApplyStatus = "string"
#' )
#' ),
#' PendingActions = list(
#' "string"
#' ),
#' MaintenanceTrackName = "string",
#' ElasticResizeNumberOfNodeOptions = "string",
#' DeferredMaintenanceWindows = list(
#' list(
#' DeferMaintenanceIdentifier = "string",
#' DeferMaintenanceStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' DeferMaintenanceEndTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' ),
#' SnapshotScheduleIdentifier = "string",
#' SnapshotScheduleState = "MODIFYING"|"ACTIVE"|"FAILED",
#' ExpectedNextSnapshotScheduleTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ExpectedNextSnapshotScheduleTimeStatus = "string",
#' NextMaintenanceWindowStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ResizeInfo = list(
#' ResizeType = "string",
#' AllowCancelResize = TRUE|FALSE
#' ),
#' AvailabilityZoneRelocationStatus = "string",
#' ClusterNamespaceArn = "string"
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$rotate_encryption_key(
#' ClusterIdentifier = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_rotate_encryption_key
redshift_rotate_encryption_key <- function(ClusterIdentifier) {
op <- new_operation(
name = "RotateEncryptionKey",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$rotate_encryption_key_input(ClusterIdentifier = ClusterIdentifier)
output <- .redshift$rotate_encryption_key_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$rotate_encryption_key <- redshift_rotate_encryption_key
| /cran/paws.database/R/redshift_operations.R | permissive | williazo/paws | R | false | false | 387,997 | r | # This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common get_config new_operation new_request send_request
#' @include redshift_service.R
NULL
#' Exchanges a DC1 Reserved Node for a DC2 Reserved Node with no changes to
#' the configuration (term, payment type, or number of nodes) and no
#' additional costs
#'
#' @description
#' Exchanges a DC1 Reserved Node for a DC2 Reserved Node with no changes to
#' the configuration (term, payment type, or number of nodes) and no
#' additional costs.
#'
#' @usage
#' redshift_accept_reserved_node_exchange(ReservedNodeId,
#' TargetReservedNodeOfferingId)
#'
#' @param ReservedNodeId [required] A string representing the node identifier of the DC1 Reserved Node to be
#' exchanged.
#' @param TargetReservedNodeOfferingId [required] The unique identifier of the DC2 Reserved Node offering to be used for
#' the exchange. You can obtain the value for the parameter by calling
#' [`get_reserved_node_exchange_offerings`][redshift_get_reserved_node_exchange_offerings]
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' ExchangedReservedNode = list(
#' ReservedNodeId = "string",
#' ReservedNodeOfferingId = "string",
#' NodeType = "string",
#' StartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' Duration = 123,
#' FixedPrice = 123.0,
#' UsagePrice = 123.0,
#' CurrencyCode = "string",
#' NodeCount = 123,
#' State = "string",
#' OfferingType = "string",
#' RecurringCharges = list(
#' list(
#' RecurringChargeAmount = 123.0,
#' RecurringChargeFrequency = "string"
#' )
#' ),
#' ReservedNodeOfferingType = "Regular"|"Upgradable"
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$accept_reserved_node_exchange(
#' ReservedNodeId = "string",
#' TargetReservedNodeOfferingId = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_accept_reserved_node_exchange
redshift_accept_reserved_node_exchange <- function(ReservedNodeId, TargetReservedNodeOfferingId) {
op <- new_operation(
name = "AcceptReservedNodeExchange",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$accept_reserved_node_exchange_input(ReservedNodeId = ReservedNodeId, TargetReservedNodeOfferingId = TargetReservedNodeOfferingId)
output <- .redshift$accept_reserved_node_exchange_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$accept_reserved_node_exchange <- redshift_accept_reserved_node_exchange
#' Adds an inbound (ingress) rule to an Amazon Redshift security group
#'
#' @description
#' Adds an inbound (ingress) rule to an Amazon Redshift security group.
#' Depending on whether the application accessing your cluster is running
#' on the Internet or an Amazon EC2 instance, you can authorize inbound
#' access to either a Classless Interdomain Routing (CIDR)/Internet
#' Protocol (IP) range or to an Amazon EC2 security group. You can add as
#' many as 20 ingress rules to an Amazon Redshift security group.
#'
#' If you authorize access to an Amazon EC2 security group, specify
#' *EC2SecurityGroupName* and *EC2SecurityGroupOwnerId*. The Amazon EC2
#' security group and Amazon Redshift cluster must be in the same AWS
#' Region.
#'
#' If you authorize access to a CIDR/IP address range, specify *CIDRIP*.
#' For an overview of CIDR blocks, see the Wikipedia article on [Classless
#' Inter-Domain
#' Routing](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing).
#'
#' You must also associate the security group with a cluster so that
#' clients running on these IP addresses or the EC2 instance are authorized
#' to connect to the cluster. For information about managing security
#' groups, go to [Working with Security
#' Groups](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-security-groups.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' @usage
#' redshift_authorize_cluster_security_group_ingress(
#' ClusterSecurityGroupName, CIDRIP, EC2SecurityGroupName,
#' EC2SecurityGroupOwnerId)
#'
#' @param ClusterSecurityGroupName [required] The name of the security group to which the ingress rule is added.
#' @param CIDRIP The IP range to be added the Amazon Redshift security group.
#' @param EC2SecurityGroupName The EC2 security group to be added the Amazon Redshift security group.
#' @param EC2SecurityGroupOwnerId The AWS account number of the owner of the security group specified by
#' the *EC2SecurityGroupName* parameter. The AWS Access Key ID is not an
#' acceptable value.
#'
#' Example: `111122223333`
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' ClusterSecurityGroup = list(
#' ClusterSecurityGroupName = "string",
#' Description = "string",
#' EC2SecurityGroups = list(
#' list(
#' Status = "string",
#' EC2SecurityGroupName = "string",
#' EC2SecurityGroupOwnerId = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' ),
#' IPRanges = list(
#' list(
#' Status = "string",
#' CIDRIP = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' ),
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$authorize_cluster_security_group_ingress(
#' ClusterSecurityGroupName = "string",
#' CIDRIP = "string",
#' EC2SecurityGroupName = "string",
#' EC2SecurityGroupOwnerId = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_authorize_cluster_security_group_ingress
redshift_authorize_cluster_security_group_ingress <- function(ClusterSecurityGroupName, CIDRIP = NULL, EC2SecurityGroupName = NULL, EC2SecurityGroupOwnerId = NULL) {
op <- new_operation(
name = "AuthorizeClusterSecurityGroupIngress",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$authorize_cluster_security_group_ingress_input(ClusterSecurityGroupName = ClusterSecurityGroupName, CIDRIP = CIDRIP, EC2SecurityGroupName = EC2SecurityGroupName, EC2SecurityGroupOwnerId = EC2SecurityGroupOwnerId)
output <- .redshift$authorize_cluster_security_group_ingress_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$authorize_cluster_security_group_ingress <- redshift_authorize_cluster_security_group_ingress
#' Authorizes the specified AWS customer account to restore the specified
#' snapshot
#'
#' @description
#' Authorizes the specified AWS customer account to restore the specified
#' snapshot.
#'
#' For more information about working with snapshots, go to [Amazon
#' Redshift
#' Snapshots](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-snapshots.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' @usage
#' redshift_authorize_snapshot_access(SnapshotIdentifier,
#' SnapshotClusterIdentifier, AccountWithRestoreAccess)
#'
#' @param SnapshotIdentifier [required] The identifier of the snapshot the account is authorized to restore.
#' @param SnapshotClusterIdentifier The identifier of the cluster the snapshot was created from. This
#' parameter is required if your IAM user has a policy containing a
#' snapshot resource element that specifies anything other than * for the
#' cluster name.
#' @param AccountWithRestoreAccess [required] The identifier of the AWS customer account authorized to restore the
#' specified snapshot.
#'
#' To share a snapshot with AWS support, specify amazon-redshift-support.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Snapshot = list(
#' SnapshotIdentifier = "string",
#' ClusterIdentifier = "string",
#' SnapshotCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' Status = "string",
#' Port = 123,
#' AvailabilityZone = "string",
#' ClusterCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' MasterUsername = "string",
#' ClusterVersion = "string",
#' EngineFullVersion = "string",
#' SnapshotType = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' DBName = "string",
#' VpcId = "string",
#' Encrypted = TRUE|FALSE,
#' KmsKeyId = "string",
#' EncryptedWithHSM = TRUE|FALSE,
#' AccountsWithRestoreAccess = list(
#' list(
#' AccountId = "string",
#' AccountAlias = "string"
#' )
#' ),
#' OwnerAccount = "string",
#' TotalBackupSizeInMegaBytes = 123.0,
#' ActualIncrementalBackupSizeInMegaBytes = 123.0,
#' BackupProgressInMegaBytes = 123.0,
#' CurrentBackupRateInMegaBytesPerSecond = 123.0,
#' EstimatedSecondsToCompletion = 123,
#' ElapsedTimeInSeconds = 123,
#' SourceRegion = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' ),
#' RestorableNodeTypes = list(
#' "string"
#' ),
#' EnhancedVpcRouting = TRUE|FALSE,
#' MaintenanceTrackName = "string",
#' ManualSnapshotRetentionPeriod = 123,
#' ManualSnapshotRemainingDays = 123,
#' SnapshotRetentionStartTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$authorize_snapshot_access(
#' SnapshotIdentifier = "string",
#' SnapshotClusterIdentifier = "string",
#' AccountWithRestoreAccess = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_authorize_snapshot_access
redshift_authorize_snapshot_access <- function(SnapshotIdentifier, SnapshotClusterIdentifier = NULL, AccountWithRestoreAccess) {
op <- new_operation(
name = "AuthorizeSnapshotAccess",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$authorize_snapshot_access_input(SnapshotIdentifier = SnapshotIdentifier, SnapshotClusterIdentifier = SnapshotClusterIdentifier, AccountWithRestoreAccess = AccountWithRestoreAccess)
output <- .redshift$authorize_snapshot_access_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$authorize_snapshot_access <- redshift_authorize_snapshot_access
#' Deletes a set of cluster snapshots
#'
#' @description
#' Deletes a set of cluster snapshots.
#'
#' @usage
#' redshift_batch_delete_cluster_snapshots(Identifiers)
#'
#' @param Identifiers [required] A list of identifiers for the snapshots that you want to delete.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Resources = list(
#' "string"
#' ),
#' Errors = list(
#' list(
#' SnapshotIdentifier = "string",
#' SnapshotClusterIdentifier = "string",
#' FailureCode = "string",
#' FailureReason = "string"
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$batch_delete_cluster_snapshots(
#' Identifiers = list(
#' list(
#' SnapshotIdentifier = "string",
#' SnapshotClusterIdentifier = "string"
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_batch_delete_cluster_snapshots
redshift_batch_delete_cluster_snapshots <- function(Identifiers) {
op <- new_operation(
name = "BatchDeleteClusterSnapshots",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$batch_delete_cluster_snapshots_input(Identifiers = Identifiers)
output <- .redshift$batch_delete_cluster_snapshots_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$batch_delete_cluster_snapshots <- redshift_batch_delete_cluster_snapshots
#' Modifies the settings for a set of cluster snapshots
#'
#' @description
#' Modifies the settings for a set of cluster snapshots.
#'
#' @usage
#' redshift_batch_modify_cluster_snapshots(SnapshotIdentifierList,
#' ManualSnapshotRetentionPeriod, Force)
#'
#' @param SnapshotIdentifierList [required] A list of snapshot identifiers you want to modify.
#' @param ManualSnapshotRetentionPeriod The number of days that a manual snapshot is retained. If you specify
#' the value -1, the manual snapshot is retained indefinitely.
#'
#' The number must be either -1 or an integer between 1 and 3,653.
#'
#' If you decrease the manual snapshot retention period from its current
#' value, existing manual snapshots that fall outside of the new retention
#' period will return an error. If you want to suppress the errors and
#' delete the snapshots, use the force option.
#' @param Force A boolean value indicating whether to override an exception if the
#' retention period has passed.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Resources = list(
#' "string"
#' ),
#' Errors = list(
#' list(
#' SnapshotIdentifier = "string",
#' SnapshotClusterIdentifier = "string",
#' FailureCode = "string",
#' FailureReason = "string"
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$batch_modify_cluster_snapshots(
#' SnapshotIdentifierList = list(
#' "string"
#' ),
#' ManualSnapshotRetentionPeriod = 123,
#' Force = TRUE|FALSE
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_batch_modify_cluster_snapshots
redshift_batch_modify_cluster_snapshots <- function(SnapshotIdentifierList, ManualSnapshotRetentionPeriod = NULL, Force = NULL) {
op <- new_operation(
name = "BatchModifyClusterSnapshots",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$batch_modify_cluster_snapshots_input(SnapshotIdentifierList = SnapshotIdentifierList, ManualSnapshotRetentionPeriod = ManualSnapshotRetentionPeriod, Force = Force)
output <- .redshift$batch_modify_cluster_snapshots_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$batch_modify_cluster_snapshots <- redshift_batch_modify_cluster_snapshots
#' Cancels a resize operation for a cluster
#'
#' @description
#' Cancels a resize operation for a cluster.
#'
#' @usage
#' redshift_cancel_resize(ClusterIdentifier)
#'
#' @param ClusterIdentifier [required] The unique identifier for the cluster that you want to cancel a resize
#' operation for.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' TargetNodeType = "string",
#' TargetNumberOfNodes = 123,
#' TargetClusterType = "string",
#' Status = "string",
#' ImportTablesCompleted = list(
#' "string"
#' ),
#' ImportTablesInProgress = list(
#' "string"
#' ),
#' ImportTablesNotStarted = list(
#' "string"
#' ),
#' AvgResizeRateInMegaBytesPerSecond = 123.0,
#' TotalResizeDataInMegaBytes = 123,
#' ProgressInMegaBytes = 123,
#' ElapsedTimeInSeconds = 123,
#' EstimatedTimeToCompletionInSeconds = 123,
#' ResizeType = "string",
#' Message = "string",
#' TargetEncryptionType = "string",
#' DataTransferProgressPercent = 123.0
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$cancel_resize(
#' ClusterIdentifier = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_cancel_resize
redshift_cancel_resize <- function(ClusterIdentifier) {
op <- new_operation(
name = "CancelResize",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$cancel_resize_input(ClusterIdentifier = ClusterIdentifier)
output <- .redshift$cancel_resize_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$cancel_resize <- redshift_cancel_resize
#' Copies the specified automated cluster snapshot to a new manual cluster
#' snapshot
#'
#' @description
#' Copies the specified automated cluster snapshot to a new manual cluster
#' snapshot. The source must be an automated snapshot and it must be in the
#' available state.
#'
#' When you delete a cluster, Amazon Redshift deletes any automated
#' snapshots of the cluster. Also, when the retention period of the
#' snapshot expires, Amazon Redshift automatically deletes it. If you want
#' to keep an automated snapshot for a longer period, you can make a manual
#' copy of the snapshot. Manual snapshots are retained until you delete
#' them.
#'
#' For more information about working with snapshots, go to [Amazon
#' Redshift
#' Snapshots](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-snapshots.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' @usage
#' redshift_copy_cluster_snapshot(SourceSnapshotIdentifier,
#' SourceSnapshotClusterIdentifier, TargetSnapshotIdentifier,
#' ManualSnapshotRetentionPeriod)
#'
#' @param SourceSnapshotIdentifier [required] The identifier for the source snapshot.
#'
#' Constraints:
#'
#' - Must be the identifier for a valid automated snapshot whose state is
#' `available`.
#' @param SourceSnapshotClusterIdentifier The identifier of the cluster the source snapshot was created from. This
#' parameter is required if your IAM user has a policy containing a
#' snapshot resource element that specifies anything other than * for the
#' cluster name.
#'
#' Constraints:
#'
#' - Must be the identifier for a valid cluster.
#' @param TargetSnapshotIdentifier [required] The identifier given to the new manual snapshot.
#'
#' Constraints:
#'
#' - Cannot be null, empty, or blank.
#'
#' - Must contain from 1 to 255 alphanumeric characters or hyphens.
#'
#' - First character must be a letter.
#'
#' - Cannot end with a hyphen or contain two consecutive hyphens.
#'
#' - Must be unique for the AWS account that is making the request.
#' @param ManualSnapshotRetentionPeriod The number of days that a manual snapshot is retained. If the value is
#' -1, the manual snapshot is retained indefinitely.
#'
#' The value must be either -1 or an integer between 1 and 3,653.
#'
#' The default value is -1.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Snapshot = list(
#' SnapshotIdentifier = "string",
#' ClusterIdentifier = "string",
#' SnapshotCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' Status = "string",
#' Port = 123,
#' AvailabilityZone = "string",
#' ClusterCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' MasterUsername = "string",
#' ClusterVersion = "string",
#' EngineFullVersion = "string",
#' SnapshotType = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' DBName = "string",
#' VpcId = "string",
#' Encrypted = TRUE|FALSE,
#' KmsKeyId = "string",
#' EncryptedWithHSM = TRUE|FALSE,
#' AccountsWithRestoreAccess = list(
#' list(
#' AccountId = "string",
#' AccountAlias = "string"
#' )
#' ),
#' OwnerAccount = "string",
#' TotalBackupSizeInMegaBytes = 123.0,
#' ActualIncrementalBackupSizeInMegaBytes = 123.0,
#' BackupProgressInMegaBytes = 123.0,
#' CurrentBackupRateInMegaBytesPerSecond = 123.0,
#' EstimatedSecondsToCompletion = 123,
#' ElapsedTimeInSeconds = 123,
#' SourceRegion = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' ),
#' RestorableNodeTypes = list(
#' "string"
#' ),
#' EnhancedVpcRouting = TRUE|FALSE,
#' MaintenanceTrackName = "string",
#' ManualSnapshotRetentionPeriod = 123,
#' ManualSnapshotRemainingDays = 123,
#' SnapshotRetentionStartTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$copy_cluster_snapshot(
#' SourceSnapshotIdentifier = "string",
#' SourceSnapshotClusterIdentifier = "string",
#' TargetSnapshotIdentifier = "string",
#' ManualSnapshotRetentionPeriod = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_copy_cluster_snapshot
redshift_copy_cluster_snapshot <- function(SourceSnapshotIdentifier, SourceSnapshotClusterIdentifier = NULL, TargetSnapshotIdentifier, ManualSnapshotRetentionPeriod = NULL) {
op <- new_operation(
name = "CopyClusterSnapshot",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$copy_cluster_snapshot_input(SourceSnapshotIdentifier = SourceSnapshotIdentifier, SourceSnapshotClusterIdentifier = SourceSnapshotClusterIdentifier, TargetSnapshotIdentifier = TargetSnapshotIdentifier, ManualSnapshotRetentionPeriod = ManualSnapshotRetentionPeriod)
output <- .redshift$copy_cluster_snapshot_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$copy_cluster_snapshot <- redshift_copy_cluster_snapshot
#' Creates a new cluster with the specified parameters
#'
#' @description
#' Creates a new cluster with the specified parameters.
#'
#' To create a cluster in Virtual Private Cloud (VPC), you must provide a
#' cluster subnet group name. The cluster subnet group identifies the
#' subnets of your VPC that Amazon Redshift uses when creating the cluster.
#' For more information about managing clusters, go to [Amazon Redshift
#' Clusters](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' @usage
#' redshift_create_cluster(DBName, ClusterIdentifier, ClusterType,
#' NodeType, MasterUsername, MasterUserPassword, ClusterSecurityGroups,
#' VpcSecurityGroupIds, ClusterSubnetGroupName, AvailabilityZone,
#' PreferredMaintenanceWindow, ClusterParameterGroupName,
#' AutomatedSnapshotRetentionPeriod, ManualSnapshotRetentionPeriod, Port,
#' ClusterVersion, AllowVersionUpgrade, NumberOfNodes, PubliclyAccessible,
#' Encrypted, HsmClientCertificateIdentifier, HsmConfigurationIdentifier,
#' ElasticIp, Tags, KmsKeyId, EnhancedVpcRouting, AdditionalInfo, IamRoles,
#' MaintenanceTrackName, SnapshotScheduleIdentifier,
#' AvailabilityZoneRelocation)
#'
#' @param DBName The name of the first database to be created when the cluster is
#' created.
#'
#' To create additional databases after the cluster is created, connect to
#' the cluster with a SQL client and use SQL commands to create a database.
#' For more information, go to [Create a
#' Database](https://docs.aws.amazon.com/redshift/latest/gsg/t_creating_database.html)
#' in the Amazon Redshift Database Developer Guide.
#'
#' Default: `dev`
#'
#' Constraints:
#'
#' - Must contain 1 to 64 alphanumeric characters.
#'
#' - Must contain only lowercase letters.
#'
#' - Cannot be a word that is reserved by the service. A list of reserved
#' words can be found in [Reserved
#' Words](https://docs.aws.amazon.com/redshift/latest/dg/r_pg_keywords.html)
#' in the Amazon Redshift Database Developer Guide.
#' @param ClusterIdentifier [required] A unique identifier for the cluster. You use this identifier to refer to
#' the cluster for any subsequent cluster operations such as deleting or
#' modifying. The identifier also appears in the Amazon Redshift console.
#'
#' Constraints:
#'
#' - Must contain from 1 to 63 alphanumeric characters or hyphens.
#'
#' - Alphabetic characters must be lowercase.
#'
#' - First character must be a letter.
#'
#' - Cannot end with a hyphen or contain two consecutive hyphens.
#'
#' - Must be unique for all clusters within an AWS account.
#'
#' Example: `myexamplecluster`
#' @param ClusterType The type of the cluster. When cluster type is specified as
#'
#' - `single-node`, the **NumberOfNodes** parameter is not required.
#'
#' - `multi-node`, the **NumberOfNodes** parameter is required.
#'
#' Valid Values: `multi-node` | `single-node`
#'
#' Default: `multi-node`
#' @param NodeType [required] The node type to be provisioned for the cluster. For information about
#' node types, go to [Working with
#' Clusters](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#how-many-nodes)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' Valid Values: `ds2.xlarge` | `ds2.8xlarge` | `dc1.large` | `dc1.8xlarge`
#' | `dc2.large` | `dc2.8xlarge` | `ra3.xlplus` | `ra3.4xlarge` |
#' `ra3.16xlarge`
#' @param MasterUsername [required] The user name associated with the master user account for the cluster
#' that is being created.
#'
#' Constraints:
#'
#' - Must be 1 - 128 alphanumeric characters. The user name can't be
#' `PUBLIC`.
#'
#' - First character must be a letter.
#'
#' - Cannot be a reserved word. A list of reserved words can be found in
#' [Reserved
#' Words](https://docs.aws.amazon.com/redshift/latest/dg/r_pg_keywords.html)
#' in the Amazon Redshift Database Developer Guide.
#' @param MasterUserPassword [required] The password associated with the master user account for the cluster
#' that is being created.
#'
#' Constraints:
#'
#' - Must be between 8 and 64 characters in length.
#'
#' - Must contain at least one uppercase letter.
#'
#' - Must contain at least one lowercase letter.
#'
#' - Must contain one number.
#'
#' - Can be any printable ASCII character (ASCII code 33 to 126) except '
#' (single quote), " (double quote), \\, /, @@, or space.
#' @param ClusterSecurityGroups A list of security groups to be associated with this cluster.
#'
#' Default: The default cluster security group for Amazon Redshift.
#' @param VpcSecurityGroupIds A list of Virtual Private Cloud (VPC) security groups to be associated
#' with the cluster.
#'
#' Default: The default VPC security group is associated with the cluster.
#' @param ClusterSubnetGroupName The name of a cluster subnet group to be associated with this cluster.
#'
#' If this parameter is not provided the resulting cluster will be deployed
#' outside virtual private cloud (VPC).
#' @param AvailabilityZone The EC2 Availability Zone (AZ) in which you want Amazon Redshift to
#' provision the cluster. For example, if you have several EC2 instances
#' running in a specific Availability Zone, then you might want the cluster
#' to be provisioned in the same zone in order to decrease network latency.
#'
#' Default: A random, system-chosen Availability Zone in the region that is
#' specified by the endpoint.
#'
#' Example: `us-east-2d`
#'
#' Constraint: The specified Availability Zone must be in the same region
#' as the current endpoint.
#' @param PreferredMaintenanceWindow The weekly time range (in UTC) during which automated cluster
#' maintenance can occur.
#'
#' Format: `ddd:hh24:mi-ddd:hh24:mi`
#'
#' Default: A 30-minute window selected at random from an 8-hour block of
#' time per region, occurring on a random day of the week. For more
#' information about the time blocks for each region, see [Maintenance
#' Windows](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#rs-maintenance-windows)
#' in Amazon Redshift Cluster Management Guide.
#'
#' Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun
#'
#' Constraints: Minimum 30-minute window.
#' @param ClusterParameterGroupName The name of the parameter group to be associated with this cluster.
#'
#' Default: The default Amazon Redshift cluster parameter group. For
#' information about the default parameter group, go to [Working with
#' Amazon Redshift Parameter
#' Groups](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html)
#'
#' Constraints:
#'
#' - Must be 1 to 255 alphanumeric characters or hyphens.
#'
#' - First character must be a letter.
#'
#' - Cannot end with a hyphen or contain two consecutive hyphens.
#' @param AutomatedSnapshotRetentionPeriod The number of days that automated snapshots are retained. If the value
#' is 0, automated snapshots are disabled. Even if automated snapshots are
#' disabled, you can still create manual snapshots when you want with
#' [`create_cluster_snapshot`][redshift_create_cluster_snapshot].
#'
#' Default: `1`
#'
#' Constraints: Must be a value from 0 to 35.
#' @param ManualSnapshotRetentionPeriod The default number of days to retain a manual snapshot. If the value is
#' -1, the snapshot is retained indefinitely. This setting doesn't change
#' the retention period of existing snapshots.
#'
#' The value must be either -1 or an integer between 1 and 3,653.
#' @param Port The port number on which the cluster accepts incoming connections.
#'
#' The cluster is accessible only via the JDBC and ODBC connection strings.
#' Part of the connection string requires the port on which the cluster
#' will listen for incoming connections.
#'
#' Default: `5439`
#'
#' Valid Values: `1150-65535`
#' @param ClusterVersion The version of the Amazon Redshift engine software that you want to
#' deploy on the cluster.
#'
#' The version selected runs on all the nodes in the cluster.
#'
#' Constraints: Only version 1.0 is currently available.
#'
#' Example: `1.0`
#' @param AllowVersionUpgrade If `true`, major version upgrades can be applied during the maintenance
#' window to the Amazon Redshift engine that is running on the cluster.
#'
#' When a new major version of the Amazon Redshift engine is released, you
#' can request that the service automatically apply upgrades during the
#' maintenance window to the Amazon Redshift engine that is running on your
#' cluster.
#'
#' Default: `true`
#' @param NumberOfNodes The number of compute nodes in the cluster. This parameter is required
#' when the **ClusterType** parameter is specified as `multi-node`.
#'
#' For information about determining how many nodes you need, go to
#' [Working with
#' Clusters](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#how-many-nodes)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' If you don't specify this parameter, you get a single-node cluster. When
#' requesting a multi-node cluster, you must specify the number of nodes
#' that you want in the cluster.
#'
#' Default: `1`
#'
#' Constraints: Value must be at least 1 and no more than 100.
#' @param PubliclyAccessible If `true`, the cluster can be accessed from a public network.
#' @param Encrypted If `true`, the data in the cluster is encrypted at rest.
#'
#' Default: false
#' @param HsmClientCertificateIdentifier Specifies the name of the HSM client certificate the Amazon Redshift
#' cluster uses to retrieve the data encryption keys stored in an HSM.
#' @param HsmConfigurationIdentifier Specifies the name of the HSM configuration that contains the
#' information the Amazon Redshift cluster can use to retrieve and store
#' keys in an HSM.
#' @param ElasticIp The Elastic IP (EIP) address for the cluster.
#'
#' Constraints: The cluster must be provisioned in EC2-VPC and
#' publicly-accessible through an Internet gateway. For more information
#' about provisioning clusters in EC2-VPC, go to [Supported Platforms to
#' Launch Your
#' Cluster](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#cluster-platforms)
#' in the Amazon Redshift Cluster Management Guide.
#' @param Tags A list of tag instances.
#' @param KmsKeyId The AWS Key Management Service (KMS) key ID of the encryption key that
#' you want to use to encrypt data in the cluster.
#' @param EnhancedVpcRouting An option that specifies whether to create the cluster with enhanced VPC
#' routing enabled. To create a cluster that uses enhanced VPC routing, the
#' cluster must be in a VPC. For more information, see [Enhanced VPC
#' Routing](https://docs.aws.amazon.com/redshift/latest/mgmt/enhanced-vpc-routing.html)
#' in the Amazon Redshift Cluster Management Guide.
#'
#' If this option is `true`, enhanced VPC routing is enabled.
#'
#' Default: false
#' @param AdditionalInfo Reserved.
#' @param IamRoles A list of AWS Identity and Access Management (IAM) roles that can be
#' used by the cluster to access other AWS services. You must supply the
#' IAM roles in their Amazon Resource Name (ARN) format. You can supply up
#' to 10 IAM roles in a single request.
#'
#' A cluster can have up to 10 IAM roles associated with it at any time.
#' @param MaintenanceTrackName An optional parameter for the name of the maintenance track for the
#' cluster. If you don't provide a maintenance track name, the cluster is
#' assigned to the `current` track.
#' @param SnapshotScheduleIdentifier A unique identifier for the snapshot schedule.
#' @param AvailabilityZoneRelocation The option to enable relocation for an Amazon Redshift cluster between
#' Availability Zones after the cluster is created.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Cluster = list(
#' ClusterIdentifier = "string",
#' NodeType = "string",
#' ClusterStatus = "string",
#' ClusterAvailabilityStatus = "string",
#' ModifyStatus = "string",
#' MasterUsername = "string",
#' DBName = "string",
#' Endpoint = list(
#' Address = "string",
#' Port = 123,
#' VpcEndpoints = list(
#' list(
#' VpcEndpointId = "string"
#' )
#' )
#' ),
#' ClusterCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' AutomatedSnapshotRetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' ClusterSecurityGroups = list(
#' list(
#' ClusterSecurityGroupName = "string",
#' Status = "string"
#' )
#' ),
#' VpcSecurityGroups = list(
#' list(
#' VpcSecurityGroupId = "string",
#' Status = "string"
#' )
#' ),
#' ClusterParameterGroups = list(
#' list(
#' ParameterGroupName = "string",
#' ParameterApplyStatus = "string",
#' ClusterParameterStatusList = list(
#' list(
#' ParameterName = "string",
#' ParameterApplyStatus = "string",
#' ParameterApplyErrorDescription = "string"
#' )
#' )
#' )
#' ),
#' ClusterSubnetGroupName = "string",
#' VpcId = "string",
#' AvailabilityZone = "string",
#' PreferredMaintenanceWindow = "string",
#' PendingModifiedValues = list(
#' MasterUserPassword = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' ClusterType = "string",
#' ClusterVersion = "string",
#' AutomatedSnapshotRetentionPeriod = 123,
#' ClusterIdentifier = "string",
#' PubliclyAccessible = TRUE|FALSE,
#' EnhancedVpcRouting = TRUE|FALSE,
#' MaintenanceTrackName = "string",
#' EncryptionType = "string"
#' ),
#' ClusterVersion = "string",
#' AllowVersionUpgrade = TRUE|FALSE,
#' NumberOfNodes = 123,
#' PubliclyAccessible = TRUE|FALSE,
#' Encrypted = TRUE|FALSE,
#' RestoreStatus = list(
#' Status = "string",
#' CurrentRestoreRateInMegaBytesPerSecond = 123.0,
#' SnapshotSizeInMegaBytes = 123,
#' ProgressInMegaBytes = 123,
#' ElapsedTimeInSeconds = 123,
#' EstimatedTimeToCompletionInSeconds = 123
#' ),
#' DataTransferProgress = list(
#' Status = "string",
#' CurrentRateInMegaBytesPerSecond = 123.0,
#' TotalDataInMegaBytes = 123,
#' DataTransferredInMegaBytes = 123,
#' EstimatedTimeToCompletionInSeconds = 123,
#' ElapsedTimeInSeconds = 123
#' ),
#' HsmStatus = list(
#' HsmClientCertificateIdentifier = "string",
#' HsmConfigurationIdentifier = "string",
#' Status = "string"
#' ),
#' ClusterSnapshotCopyStatus = list(
#' DestinationRegion = "string",
#' RetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' SnapshotCopyGrantName = "string"
#' ),
#' ClusterPublicKey = "string",
#' ClusterNodes = list(
#' list(
#' NodeRole = "string",
#' PrivateIPAddress = "string",
#' PublicIPAddress = "string"
#' )
#' ),
#' ElasticIpStatus = list(
#' ElasticIp = "string",
#' Status = "string"
#' ),
#' ClusterRevisionNumber = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' ),
#' KmsKeyId = "string",
#' EnhancedVpcRouting = TRUE|FALSE,
#' IamRoles = list(
#' list(
#' IamRoleArn = "string",
#' ApplyStatus = "string"
#' )
#' ),
#' PendingActions = list(
#' "string"
#' ),
#' MaintenanceTrackName = "string",
#' ElasticResizeNumberOfNodeOptions = "string",
#' DeferredMaintenanceWindows = list(
#' list(
#' DeferMaintenanceIdentifier = "string",
#' DeferMaintenanceStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' DeferMaintenanceEndTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' ),
#' SnapshotScheduleIdentifier = "string",
#' SnapshotScheduleState = "MODIFYING"|"ACTIVE"|"FAILED",
#' ExpectedNextSnapshotScheduleTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ExpectedNextSnapshotScheduleTimeStatus = "string",
#' NextMaintenanceWindowStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ResizeInfo = list(
#' ResizeType = "string",
#' AllowCancelResize = TRUE|FALSE
#' ),
#' AvailabilityZoneRelocationStatus = "string",
#' ClusterNamespaceArn = "string"
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$create_cluster(
#' DBName = "string",
#' ClusterIdentifier = "string",
#' ClusterType = "string",
#' NodeType = "string",
#' MasterUsername = "string",
#' MasterUserPassword = "string",
#' ClusterSecurityGroups = list(
#' "string"
#' ),
#' VpcSecurityGroupIds = list(
#' "string"
#' ),
#' ClusterSubnetGroupName = "string",
#' AvailabilityZone = "string",
#' PreferredMaintenanceWindow = "string",
#' ClusterParameterGroupName = "string",
#' AutomatedSnapshotRetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' Port = 123,
#' ClusterVersion = "string",
#' AllowVersionUpgrade = TRUE|FALSE,
#' NumberOfNodes = 123,
#' PubliclyAccessible = TRUE|FALSE,
#' Encrypted = TRUE|FALSE,
#' HsmClientCertificateIdentifier = "string",
#' HsmConfigurationIdentifier = "string",
#' ElasticIp = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' ),
#' KmsKeyId = "string",
#' EnhancedVpcRouting = TRUE|FALSE,
#' AdditionalInfo = "string",
#' IamRoles = list(
#' "string"
#' ),
#' MaintenanceTrackName = "string",
#' SnapshotScheduleIdentifier = "string",
#' AvailabilityZoneRelocation = TRUE|FALSE
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_create_cluster
redshift_create_cluster <- function(DBName = NULL, ClusterIdentifier, ClusterType = NULL, NodeType, MasterUsername, MasterUserPassword, ClusterSecurityGroups = NULL, VpcSecurityGroupIds = NULL, ClusterSubnetGroupName = NULL, AvailabilityZone = NULL, PreferredMaintenanceWindow = NULL, ClusterParameterGroupName = NULL, AutomatedSnapshotRetentionPeriod = NULL, ManualSnapshotRetentionPeriod = NULL, Port = NULL, ClusterVersion = NULL, AllowVersionUpgrade = NULL, NumberOfNodes = NULL, PubliclyAccessible = NULL, Encrypted = NULL, HsmClientCertificateIdentifier = NULL, HsmConfigurationIdentifier = NULL, ElasticIp = NULL, Tags = NULL, KmsKeyId = NULL, EnhancedVpcRouting = NULL, AdditionalInfo = NULL, IamRoles = NULL, MaintenanceTrackName = NULL, SnapshotScheduleIdentifier = NULL, AvailabilityZoneRelocation = NULL) {
op <- new_operation(
name = "CreateCluster",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$create_cluster_input(DBName = DBName, ClusterIdentifier = ClusterIdentifier, ClusterType = ClusterType, NodeType = NodeType, MasterUsername = MasterUsername, MasterUserPassword = MasterUserPassword, ClusterSecurityGroups = ClusterSecurityGroups, VpcSecurityGroupIds = VpcSecurityGroupIds, ClusterSubnetGroupName = ClusterSubnetGroupName, AvailabilityZone = AvailabilityZone, PreferredMaintenanceWindow = PreferredMaintenanceWindow, ClusterParameterGroupName = ClusterParameterGroupName, AutomatedSnapshotRetentionPeriod = AutomatedSnapshotRetentionPeriod, ManualSnapshotRetentionPeriod = ManualSnapshotRetentionPeriod, Port = Port, ClusterVersion = ClusterVersion, AllowVersionUpgrade = AllowVersionUpgrade, NumberOfNodes = NumberOfNodes, PubliclyAccessible = PubliclyAccessible, Encrypted = Encrypted, HsmClientCertificateIdentifier = HsmClientCertificateIdentifier, HsmConfigurationIdentifier = HsmConfigurationIdentifier, ElasticIp = ElasticIp, Tags = Tags, KmsKeyId = KmsKeyId, EnhancedVpcRouting = EnhancedVpcRouting, AdditionalInfo = AdditionalInfo, IamRoles = IamRoles, MaintenanceTrackName = MaintenanceTrackName, SnapshotScheduleIdentifier = SnapshotScheduleIdentifier, AvailabilityZoneRelocation = AvailabilityZoneRelocation)
output <- .redshift$create_cluster_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$create_cluster <- redshift_create_cluster
#' Creates an Amazon Redshift parameter group
#'
#' @description
#' Creates an Amazon Redshift parameter group.
#'
#' Creating parameter groups is independent of creating clusters. You can
#' associate a cluster with a parameter group when you create the cluster.
#' You can also associate an existing cluster with a parameter group after
#' the cluster is created by using
#' [`modify_cluster`][redshift_modify_cluster].
#'
#' Parameters in the parameter group define specific behavior that applies
#' to the databases you create on the cluster. For more information about
#' parameters and parameter groups, go to [Amazon Redshift Parameter
#' Groups](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' @usage
#' redshift_create_cluster_parameter_group(ParameterGroupName,
#' ParameterGroupFamily, Description, Tags)
#'
#' @param ParameterGroupName [required] The name of the cluster parameter group.
#'
#' Constraints:
#'
#' - Must be 1 to 255 alphanumeric characters or hyphens
#'
#' - First character must be a letter.
#'
#' - Cannot end with a hyphen or contain two consecutive hyphens.
#'
#' - Must be unique withing your AWS account.
#'
#' This value is stored as a lower-case string.
#' @param ParameterGroupFamily [required] The Amazon Redshift engine version to which the cluster parameter group
#' applies. The cluster engine version determines the set of parameters.
#'
#' To get a list of valid parameter group family names, you can call
#' [`describe_cluster_parameter_groups`][redshift_describe_cluster_parameter_groups].
#' By default, Amazon Redshift returns a list of all the parameter groups
#' that are owned by your AWS account, including the default parameter
#' groups for each Amazon Redshift engine version. The parameter group
#' family names associated with the default parameter groups provide you
#' the valid values. For example, a valid family name is "redshift-1.0".
#' @param Description [required] A description of the parameter group.
#' @param Tags A list of tag instances.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' ClusterParameterGroup = list(
#' ParameterGroupName = "string",
#' ParameterGroupFamily = "string",
#' Description = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$create_cluster_parameter_group(
#' ParameterGroupName = "string",
#' ParameterGroupFamily = "string",
#' Description = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_create_cluster_parameter_group
redshift_create_cluster_parameter_group <- function(ParameterGroupName, ParameterGroupFamily, Description, Tags = NULL) {
op <- new_operation(
name = "CreateClusterParameterGroup",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$create_cluster_parameter_group_input(ParameterGroupName = ParameterGroupName, ParameterGroupFamily = ParameterGroupFamily, Description = Description, Tags = Tags)
output <- .redshift$create_cluster_parameter_group_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$create_cluster_parameter_group <- redshift_create_cluster_parameter_group
#' Creates a new Amazon Redshift security group
#'
#' @description
#' Creates a new Amazon Redshift security group. You use security groups to
#' control access to non-VPC clusters.
#'
#' For information about managing security groups, go to [Amazon Redshift
#' Cluster Security
#' Groups](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-security-groups.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' @usage
#' redshift_create_cluster_security_group(ClusterSecurityGroupName,
#' Description, Tags)
#'
#' @param ClusterSecurityGroupName [required] The name for the security group. Amazon Redshift stores the value as a
#' lowercase string.
#'
#' Constraints:
#'
#' - Must contain no more than 255 alphanumeric characters or hyphens.
#'
#' - Must not be "Default".
#'
#' - Must be unique for all security groups that are created by your AWS
#' account.
#'
#' Example: `examplesecuritygroup`
#' @param Description [required] A description for the security group.
#' @param Tags A list of tag instances.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' ClusterSecurityGroup = list(
#' ClusterSecurityGroupName = "string",
#' Description = "string",
#' EC2SecurityGroups = list(
#' list(
#' Status = "string",
#' EC2SecurityGroupName = "string",
#' EC2SecurityGroupOwnerId = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' ),
#' IPRanges = list(
#' list(
#' Status = "string",
#' CIDRIP = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' ),
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$create_cluster_security_group(
#' ClusterSecurityGroupName = "string",
#' Description = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_create_cluster_security_group
redshift_create_cluster_security_group <- function(ClusterSecurityGroupName, Description, Tags = NULL) {
op <- new_operation(
name = "CreateClusterSecurityGroup",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$create_cluster_security_group_input(ClusterSecurityGroupName = ClusterSecurityGroupName, Description = Description, Tags = Tags)
output <- .redshift$create_cluster_security_group_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$create_cluster_security_group <- redshift_create_cluster_security_group
#' Creates a manual snapshot of the specified cluster
#'
#' @description
#' Creates a manual snapshot of the specified cluster. The cluster must be
#' in the `available` state.
#'
#' For more information about working with snapshots, go to [Amazon
#' Redshift
#' Snapshots](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-snapshots.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' @usage
#' redshift_create_cluster_snapshot(SnapshotIdentifier, ClusterIdentifier,
#' ManualSnapshotRetentionPeriod, Tags)
#'
#' @param SnapshotIdentifier [required] A unique identifier for the snapshot that you are requesting. This
#' identifier must be unique for all snapshots within the AWS account.
#'
#' Constraints:
#'
#' - Cannot be null, empty, or blank
#'
#' - Must contain from 1 to 255 alphanumeric characters or hyphens
#'
#' - First character must be a letter
#'
#' - Cannot end with a hyphen or contain two consecutive hyphens
#'
#' Example: `my-snapshot-id`
#' @param ClusterIdentifier [required] The cluster identifier for which you want a snapshot.
#' @param ManualSnapshotRetentionPeriod The number of days that a manual snapshot is retained. If the value is
#' -1, the manual snapshot is retained indefinitely.
#'
#' The value must be either -1 or an integer between 1 and 3,653.
#'
#' The default value is -1.
#' @param Tags A list of tag instances.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Snapshot = list(
#' SnapshotIdentifier = "string",
#' ClusterIdentifier = "string",
#' SnapshotCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' Status = "string",
#' Port = 123,
#' AvailabilityZone = "string",
#' ClusterCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' MasterUsername = "string",
#' ClusterVersion = "string",
#' EngineFullVersion = "string",
#' SnapshotType = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' DBName = "string",
#' VpcId = "string",
#' Encrypted = TRUE|FALSE,
#' KmsKeyId = "string",
#' EncryptedWithHSM = TRUE|FALSE,
#' AccountsWithRestoreAccess = list(
#' list(
#' AccountId = "string",
#' AccountAlias = "string"
#' )
#' ),
#' OwnerAccount = "string",
#' TotalBackupSizeInMegaBytes = 123.0,
#' ActualIncrementalBackupSizeInMegaBytes = 123.0,
#' BackupProgressInMegaBytes = 123.0,
#' CurrentBackupRateInMegaBytesPerSecond = 123.0,
#' EstimatedSecondsToCompletion = 123,
#' ElapsedTimeInSeconds = 123,
#' SourceRegion = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' ),
#' RestorableNodeTypes = list(
#' "string"
#' ),
#' EnhancedVpcRouting = TRUE|FALSE,
#' MaintenanceTrackName = "string",
#' ManualSnapshotRetentionPeriod = 123,
#' ManualSnapshotRemainingDays = 123,
#' SnapshotRetentionStartTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$create_cluster_snapshot(
#' SnapshotIdentifier = "string",
#' ClusterIdentifier = "string",
#' ManualSnapshotRetentionPeriod = 123,
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_create_cluster_snapshot
redshift_create_cluster_snapshot <- function(SnapshotIdentifier, ClusterIdentifier, ManualSnapshotRetentionPeriod = NULL, Tags = NULL) {
op <- new_operation(
name = "CreateClusterSnapshot",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$create_cluster_snapshot_input(SnapshotIdentifier = SnapshotIdentifier, ClusterIdentifier = ClusterIdentifier, ManualSnapshotRetentionPeriod = ManualSnapshotRetentionPeriod, Tags = Tags)
output <- .redshift$create_cluster_snapshot_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$create_cluster_snapshot <- redshift_create_cluster_snapshot
#' Creates a new Amazon Redshift subnet group
#'
#' @description
#' Creates a new Amazon Redshift subnet group. You must provide a list of
#' one or more subnets in your existing Amazon Virtual Private Cloud
#' (Amazon VPC) when creating Amazon Redshift subnet group.
#'
#' For information about subnet groups, go to [Amazon Redshift Cluster
#' Subnet
#' Groups](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-cluster-subnet-groups.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' @usage
#' redshift_create_cluster_subnet_group(ClusterSubnetGroupName,
#' Description, SubnetIds, Tags)
#'
#' @param ClusterSubnetGroupName [required] The name for the subnet group. Amazon Redshift stores the value as a
#' lowercase string.
#'
#' Constraints:
#'
#' - Must contain no more than 255 alphanumeric characters or hyphens.
#'
#' - Must not be "Default".
#'
#' - Must be unique for all subnet groups that are created by your AWS
#' account.
#'
#' Example: `examplesubnetgroup`
#' @param Description [required] A description for the subnet group.
#' @param SubnetIds [required] An array of VPC subnet IDs. A maximum of 20 subnets can be modified in a
#' single request.
#' @param Tags A list of tag instances.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' ClusterSubnetGroup = list(
#' ClusterSubnetGroupName = "string",
#' Description = "string",
#' VpcId = "string",
#' SubnetGroupStatus = "string",
#' Subnets = list(
#' list(
#' SubnetIdentifier = "string",
#' SubnetAvailabilityZone = list(
#' Name = "string",
#' SupportedPlatforms = list(
#' list(
#' Name = "string"
#' )
#' )
#' ),
#' SubnetStatus = "string"
#' )
#' ),
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$create_cluster_subnet_group(
#' ClusterSubnetGroupName = "string",
#' Description = "string",
#' SubnetIds = list(
#' "string"
#' ),
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_create_cluster_subnet_group
redshift_create_cluster_subnet_group <- function(ClusterSubnetGroupName, Description, SubnetIds, Tags = NULL) {
op <- new_operation(
name = "CreateClusterSubnetGroup",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$create_cluster_subnet_group_input(ClusterSubnetGroupName = ClusterSubnetGroupName, Description = Description, SubnetIds = SubnetIds, Tags = Tags)
output <- .redshift$create_cluster_subnet_group_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$create_cluster_subnet_group <- redshift_create_cluster_subnet_group
#' Creates an Amazon Redshift event notification subscription
#'
#' @description
#' Creates an Amazon Redshift event notification subscription. This action
#' requires an ARN (Amazon Resource Name) of an Amazon SNS topic created by
#' either the Amazon Redshift console, the Amazon SNS console, or the
#' Amazon SNS API. To obtain an ARN with Amazon SNS, you must create a
#' topic in Amazon SNS and subscribe to the topic. The ARN is displayed in
#' the SNS console.
#'
#' You can specify the source type, and lists of Amazon Redshift source
#' IDs, event categories, and event severities. Notifications will be sent
#' for all events you want that match those criteria. For example, you can
#' specify source type = cluster, source ID = my-cluster-1 and mycluster2,
#' event categories = Availability, Backup, and severity = ERROR. The
#' subscription will only send notifications for those ERROR events in the
#' Availability and Backup categories for the specified clusters.
#'
#' If you specify both the source type and source IDs, such as source type
#' = cluster and source identifier = my-cluster-1, notifications will be
#' sent for all the cluster events for my-cluster-1. If you specify a
#' source type but do not specify a source identifier, you will receive
#' notice of the events for the objects of that type in your AWS account.
#' If you do not specify either the SourceType nor the SourceIdentifier,
#' you will be notified of events generated from all Amazon Redshift
#' sources belonging to your AWS account. You must specify a source type if
#' you specify a source ID.
#'
#' @usage
#' redshift_create_event_subscription(SubscriptionName, SnsTopicArn,
#' SourceType, SourceIds, EventCategories, Severity, Enabled, Tags)
#'
#' @param SubscriptionName [required] The name of the event subscription to be created.
#'
#' Constraints:
#'
#' - Cannot be null, empty, or blank.
#'
#' - Must contain from 1 to 255 alphanumeric characters or hyphens.
#'
#' - First character must be a letter.
#'
#' - Cannot end with a hyphen or contain two consecutive hyphens.
#' @param SnsTopicArn [required] The Amazon Resource Name (ARN) of the Amazon SNS topic used to transmit
#' the event notifications. The ARN is created by Amazon SNS when you
#' create a topic and subscribe to it.
#' @param SourceType The type of source that will be generating the events. For example, if
#' you want to be notified of events generated by a cluster, you would set
#' this parameter to cluster. If this value is not specified, events are
#' returned for all Amazon Redshift objects in your AWS account. You must
#' specify a source type in order to specify source IDs.
#'
#' Valid values: cluster, cluster-parameter-group, cluster-security-group,
#' cluster-snapshot, and scheduled-action.
#' @param SourceIds A list of one or more identifiers of Amazon Redshift source objects. All
#' of the objects must be of the same type as was specified in the source
#' type parameter. The event subscription will return only events generated
#' by the specified objects. If not specified, then events are returned for
#' all objects within the source type specified.
#'
#' Example: my-cluster-1, my-cluster-2
#'
#' Example: my-snapshot-20131010
#' @param EventCategories Specifies the Amazon Redshift event categories to be published by the
#' event notification subscription.
#'
#' Values: configuration, management, monitoring, security
#' @param Severity Specifies the Amazon Redshift event severity to be published by the
#' event notification subscription.
#'
#' Values: ERROR, INFO
#' @param Enabled A boolean value; set to `true` to activate the subscription, and set to
#' `false` to create the subscription but not activate it.
#' @param Tags A list of tag instances.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' EventSubscription = list(
#' CustomerAwsId = "string",
#' CustSubscriptionId = "string",
#' SnsTopicArn = "string",
#' Status = "string",
#' SubscriptionCreationTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' SourceType = "string",
#' SourceIdsList = list(
#' "string"
#' ),
#' EventCategoriesList = list(
#' "string"
#' ),
#' Severity = "string",
#' Enabled = TRUE|FALSE,
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$create_event_subscription(
#' SubscriptionName = "string",
#' SnsTopicArn = "string",
#' SourceType = "string",
#' SourceIds = list(
#' "string"
#' ),
#' EventCategories = list(
#' "string"
#' ),
#' Severity = "string",
#' Enabled = TRUE|FALSE,
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_create_event_subscription
redshift_create_event_subscription <- function(SubscriptionName, SnsTopicArn, SourceType = NULL, SourceIds = NULL, EventCategories = NULL, Severity = NULL, Enabled = NULL, Tags = NULL) {
op <- new_operation(
name = "CreateEventSubscription",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$create_event_subscription_input(SubscriptionName = SubscriptionName, SnsTopicArn = SnsTopicArn, SourceType = SourceType, SourceIds = SourceIds, EventCategories = EventCategories, Severity = Severity, Enabled = Enabled, Tags = Tags)
output <- .redshift$create_event_subscription_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$create_event_subscription <- redshift_create_event_subscription
#' Creates an HSM client certificate that an Amazon Redshift cluster will
#' use to connect to the client's HSM in order to store and retrieve the
#' keys used to encrypt the cluster databases
#'
#' @description
#' Creates an HSM client certificate that an Amazon Redshift cluster will
#' use to connect to the client's HSM in order to store and retrieve the
#' keys used to encrypt the cluster databases.
#'
#' The command returns a public key, which you must store in the HSM. In
#' addition to creating the HSM certificate, you must create an Amazon
#' Redshift HSM configuration that provides a cluster the information
#' needed to store and use encryption keys in the HSM. For more
#' information, go to [Hardware Security
#' Modules](https://docs.aws.amazon.com/redshift/latest/mgmt/) in the
#' Amazon Redshift Cluster Management Guide.
#'
#' @usage
#' redshift_create_hsm_client_certificate(HsmClientCertificateIdentifier,
#' Tags)
#'
#' @param HsmClientCertificateIdentifier [required] The identifier to be assigned to the new HSM client certificate that the
#' cluster will use to connect to the HSM to use the database encryption
#' keys.
#' @param Tags A list of tag instances.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' HsmClientCertificate = list(
#' HsmClientCertificateIdentifier = "string",
#' HsmClientCertificatePublicKey = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$create_hsm_client_certificate(
#' HsmClientCertificateIdentifier = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_create_hsm_client_certificate
redshift_create_hsm_client_certificate <- function(HsmClientCertificateIdentifier, Tags = NULL) {
op <- new_operation(
name = "CreateHsmClientCertificate",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$create_hsm_client_certificate_input(HsmClientCertificateIdentifier = HsmClientCertificateIdentifier, Tags = Tags)
output <- .redshift$create_hsm_client_certificate_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$create_hsm_client_certificate <- redshift_create_hsm_client_certificate
#' Creates an HSM configuration that contains the information required by
#' an Amazon Redshift cluster to store and use database encryption keys in
#' a Hardware Security Module (HSM)
#'
#' @description
#' Creates an HSM configuration that contains the information required by
#' an Amazon Redshift cluster to store and use database encryption keys in
#' a Hardware Security Module (HSM). After creating the HSM configuration,
#' you can specify it as a parameter when creating a cluster. The cluster
#' will then store its encryption keys in the HSM.
#'
#' In addition to creating an HSM configuration, you must also create an
#' HSM client certificate. For more information, go to [Hardware Security
#' Modules](https://docs.aws.amazon.com/redshift/latest/mgmt/) in the
#' Amazon Redshift Cluster Management Guide.
#'
#' @usage
#' redshift_create_hsm_configuration(HsmConfigurationIdentifier,
#' Description, HsmIpAddress, HsmPartitionName, HsmPartitionPassword,
#' HsmServerPublicCertificate, Tags)
#'
#' @param HsmConfigurationIdentifier [required] The identifier to be assigned to the new Amazon Redshift HSM
#' configuration.
#' @param Description [required] A text description of the HSM configuration to be created.
#' @param HsmIpAddress [required] The IP address that the Amazon Redshift cluster must use to access the
#' HSM.
#' @param HsmPartitionName [required] The name of the partition in the HSM where the Amazon Redshift clusters
#' will store their database encryption keys.
#' @param HsmPartitionPassword [required] The password required to access the HSM partition.
#' @param HsmServerPublicCertificate [required] The HSMs public certificate file. When using Cloud HSM, the file name is
#' server.pem.
#' @param Tags A list of tag instances.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' HsmConfiguration = list(
#' HsmConfigurationIdentifier = "string",
#' Description = "string",
#' HsmIpAddress = "string",
#' HsmPartitionName = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$create_hsm_configuration(
#' HsmConfigurationIdentifier = "string",
#' Description = "string",
#' HsmIpAddress = "string",
#' HsmPartitionName = "string",
#' HsmPartitionPassword = "string",
#' HsmServerPublicCertificate = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_create_hsm_configuration
redshift_create_hsm_configuration <- function(HsmConfigurationIdentifier, Description, HsmIpAddress, HsmPartitionName, HsmPartitionPassword, HsmServerPublicCertificate, Tags = NULL) {
op <- new_operation(
name = "CreateHsmConfiguration",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$create_hsm_configuration_input(HsmConfigurationIdentifier = HsmConfigurationIdentifier, Description = Description, HsmIpAddress = HsmIpAddress, HsmPartitionName = HsmPartitionName, HsmPartitionPassword = HsmPartitionPassword, HsmServerPublicCertificate = HsmServerPublicCertificate, Tags = Tags)
output <- .redshift$create_hsm_configuration_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$create_hsm_configuration <- redshift_create_hsm_configuration
#' Creates a scheduled action
#'
#' @description
#' Creates a scheduled action. A scheduled action contains a schedule and
#' an Amazon Redshift API action. For example, you can create a schedule of
#' when to run the [`resize_cluster`][redshift_resize_cluster] API
#' operation.
#'
#' @usage
#' redshift_create_scheduled_action(ScheduledActionName, TargetAction,
#' Schedule, IamRole, ScheduledActionDescription, StartTime, EndTime,
#' Enable)
#'
#' @param ScheduledActionName [required] The name of the scheduled action. The name must be unique within an
#' account. For more information about this parameter, see ScheduledAction.
#' @param TargetAction [required] A JSON format string of the Amazon Redshift API operation with input
#' parameters. For more information about this parameter, see
#' ScheduledAction.
#' @param Schedule [required] The schedule in `at( )` or `cron( )` format. For more information about
#' this parameter, see ScheduledAction.
#' @param IamRole [required] The IAM role to assume to run the target action. For more information
#' about this parameter, see ScheduledAction.
#' @param ScheduledActionDescription The description of the scheduled action.
#' @param StartTime The start time in UTC of the scheduled action. Before this time, the
#' scheduled action does not trigger. For more information about this
#' parameter, see ScheduledAction.
#' @param EndTime The end time in UTC of the scheduled action. After this time, the
#' scheduled action does not trigger. For more information about this
#' parameter, see ScheduledAction.
#' @param Enable If true, the schedule is enabled. If false, the scheduled action does
#' not trigger. For more information about `state` of the scheduled action,
#' see ScheduledAction.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' ScheduledActionName = "string",
#' TargetAction = list(
#' ResizeCluster = list(
#' ClusterIdentifier = "string",
#' ClusterType = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' Classic = TRUE|FALSE
#' ),
#' PauseCluster = list(
#' ClusterIdentifier = "string"
#' ),
#' ResumeCluster = list(
#' ClusterIdentifier = "string"
#' )
#' ),
#' Schedule = "string",
#' IamRole = "string",
#' ScheduledActionDescription = "string",
#' State = "ACTIVE"|"DISABLED",
#' NextInvocations = list(
#' as.POSIXct(
#' "2015-01-01"
#' )
#' ),
#' StartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' EndTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$create_scheduled_action(
#' ScheduledActionName = "string",
#' TargetAction = list(
#' ResizeCluster = list(
#' ClusterIdentifier = "string",
#' ClusterType = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' Classic = TRUE|FALSE
#' ),
#' PauseCluster = list(
#' ClusterIdentifier = "string"
#' ),
#' ResumeCluster = list(
#' ClusterIdentifier = "string"
#' )
#' ),
#' Schedule = "string",
#' IamRole = "string",
#' ScheduledActionDescription = "string",
#' StartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' EndTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' Enable = TRUE|FALSE
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_create_scheduled_action
redshift_create_scheduled_action <- function(ScheduledActionName, TargetAction, Schedule, IamRole, ScheduledActionDescription = NULL, StartTime = NULL, EndTime = NULL, Enable = NULL) {
op <- new_operation(
name = "CreateScheduledAction",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$create_scheduled_action_input(ScheduledActionName = ScheduledActionName, TargetAction = TargetAction, Schedule = Schedule, IamRole = IamRole, ScheduledActionDescription = ScheduledActionDescription, StartTime = StartTime, EndTime = EndTime, Enable = Enable)
output <- .redshift$create_scheduled_action_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$create_scheduled_action <- redshift_create_scheduled_action
#' Creates a snapshot copy grant that permits Amazon Redshift to use a
#' customer master key (CMK) from AWS Key Management Service (AWS KMS) to
#' encrypt copied snapshots in a destination region
#'
#' @description
#' Creates a snapshot copy grant that permits Amazon Redshift to use a
#' customer master key (CMK) from AWS Key Management Service (AWS KMS) to
#' encrypt copied snapshots in a destination region.
#'
#' For more information about managing snapshot copy grants, go to [Amazon
#' Redshift Database
#' Encryption](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-db-encryption.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' @usage
#' redshift_create_snapshot_copy_grant(SnapshotCopyGrantName, KmsKeyId,
#' Tags)
#'
#' @param SnapshotCopyGrantName [required] The name of the snapshot copy grant. This name must be unique in the
#' region for the AWS account.
#'
#' Constraints:
#'
#' - Must contain from 1 to 63 alphanumeric characters or hyphens.
#'
#' - Alphabetic characters must be lowercase.
#'
#' - First character must be a letter.
#'
#' - Cannot end with a hyphen or contain two consecutive hyphens.
#'
#' - Must be unique for all clusters within an AWS account.
#' @param KmsKeyId The unique identifier of the customer master key (CMK) to which to grant
#' Amazon Redshift permission. If no key is specified, the default key is
#' used.
#' @param Tags A list of tag instances.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' SnapshotCopyGrant = list(
#' SnapshotCopyGrantName = "string",
#' KmsKeyId = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$create_snapshot_copy_grant(
#' SnapshotCopyGrantName = "string",
#' KmsKeyId = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_create_snapshot_copy_grant
redshift_create_snapshot_copy_grant <- function(SnapshotCopyGrantName, KmsKeyId = NULL, Tags = NULL) {
op <- new_operation(
name = "CreateSnapshotCopyGrant",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$create_snapshot_copy_grant_input(SnapshotCopyGrantName = SnapshotCopyGrantName, KmsKeyId = KmsKeyId, Tags = Tags)
output <- .redshift$create_snapshot_copy_grant_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$create_snapshot_copy_grant <- redshift_create_snapshot_copy_grant
#' Create a snapshot schedule that can be associated to a cluster and which
#' overrides the default system backup schedule
#'
#' @description
#' Create a snapshot schedule that can be associated to a cluster and which
#' overrides the default system backup schedule.
#'
#' @usage
#' redshift_create_snapshot_schedule(ScheduleDefinitions,
#' ScheduleIdentifier, ScheduleDescription, Tags, DryRun, NextInvocations)
#'
#' @param ScheduleDefinitions The definition of the snapshot schedule. The definition is made up of
#' schedule expressions, for example "cron(30 12 *)" or "rate(12 hours)".
#' @param ScheduleIdentifier A unique identifier for a snapshot schedule. Only alphanumeric
#' characters are allowed for the identifier.
#' @param ScheduleDescription The description of the snapshot schedule.
#' @param Tags An optional set of tags you can use to search for the schedule.
#' @param DryRun
#' @param NextInvocations
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' ScheduleDefinitions = list(
#' "string"
#' ),
#' ScheduleIdentifier = "string",
#' ScheduleDescription = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' ),
#' NextInvocations = list(
#' as.POSIXct(
#' "2015-01-01"
#' )
#' ),
#' AssociatedClusterCount = 123,
#' AssociatedClusters = list(
#' list(
#' ClusterIdentifier = "string",
#' ScheduleAssociationState = "MODIFYING"|"ACTIVE"|"FAILED"
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$create_snapshot_schedule(
#' ScheduleDefinitions = list(
#' "string"
#' ),
#' ScheduleIdentifier = "string",
#' ScheduleDescription = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' ),
#' DryRun = TRUE|FALSE,
#' NextInvocations = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_create_snapshot_schedule
redshift_create_snapshot_schedule <- function(ScheduleDefinitions = NULL, ScheduleIdentifier = NULL, ScheduleDescription = NULL, Tags = NULL, DryRun = NULL, NextInvocations = NULL) {
op <- new_operation(
name = "CreateSnapshotSchedule",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$create_snapshot_schedule_input(ScheduleDefinitions = ScheduleDefinitions, ScheduleIdentifier = ScheduleIdentifier, ScheduleDescription = ScheduleDescription, Tags = Tags, DryRun = DryRun, NextInvocations = NextInvocations)
output <- .redshift$create_snapshot_schedule_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$create_snapshot_schedule <- redshift_create_snapshot_schedule
#' Adds tags to a cluster
#'
#' @description
#' Adds tags to a cluster.
#'
#' A resource can have up to 50 tags. If you try to create more than 50
#' tags for a resource, you will receive an error and the attempt will
#' fail.
#'
#' If you specify a key that already exists for the resource, the value for
#' that key will be updated with the new value.
#'
#' @usage
#' redshift_create_tags(ResourceName, Tags)
#'
#' @param ResourceName [required] The Amazon Resource Name (ARN) to which you want to add the tag or tags.
#' For example, `arn:aws:redshift:us-east-2:123456789:cluster:t1`.
#' @param Tags [required] One or more name/value pairs to add as tags to the specified resource.
#' Each tag name is passed in with the parameter `Key` and the
#' corresponding value is passed in with the parameter `Value`. The `Key`
#' and `Value` parameters are separated by a comma (,). Separate multiple
#' tags with a space. For example,
#' `--tags "Key"="owner","Value"="admin" "Key"="environment","Value"="test" "Key"="version","Value"="1.0"`.
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$create_tags(
#' ResourceName = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_create_tags
redshift_create_tags <- function(ResourceName, Tags) {
op <- new_operation(
name = "CreateTags",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$create_tags_input(ResourceName = ResourceName, Tags = Tags)
output <- .redshift$create_tags_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$create_tags <- redshift_create_tags
#' Creates a usage limit for a specified Amazon Redshift feature on a
#' cluster
#'
#' @description
#' Creates a usage limit for a specified Amazon Redshift feature on a
#' cluster. The usage limit is identified by the returned usage limit
#' identifier.
#'
#' @usage
#' redshift_create_usage_limit(ClusterIdentifier, FeatureType, LimitType,
#' Amount, Period, BreachAction, Tags)
#'
#' @param ClusterIdentifier [required] The identifier of the cluster that you want to limit usage.
#' @param FeatureType [required] The Amazon Redshift feature that you want to limit.
#' @param LimitType [required] The type of limit. Depending on the feature type, this can be based on a
#' time duration or data size. If `FeatureType` is `spectrum`, then
#' `LimitType` must be `data-scanned`. If `FeatureType` is
#' `concurrency-scaling`, then `LimitType` must be `time`.
#' @param Amount [required] The limit amount. If time-based, this amount is in minutes. If
#' data-based, this amount is in terabytes (TB). The value must be a
#' positive number.
#' @param Period The time period that the amount applies to. A `weekly` period begins on
#' Sunday. The default is `monthly`.
#' @param BreachAction The action that Amazon Redshift takes when the limit is reached. The
#' default is log. For more information about this parameter, see
#' UsageLimit.
#' @param Tags A list of tag instances.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' UsageLimitId = "string",
#' ClusterIdentifier = "string",
#' FeatureType = "spectrum"|"concurrency-scaling",
#' LimitType = "time"|"data-scanned",
#' Amount = 123,
#' Period = "daily"|"weekly"|"monthly",
#' BreachAction = "log"|"emit-metric"|"disable",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$create_usage_limit(
#' ClusterIdentifier = "string",
#' FeatureType = "spectrum"|"concurrency-scaling",
#' LimitType = "time"|"data-scanned",
#' Amount = 123,
#' Period = "daily"|"weekly"|"monthly",
#' BreachAction = "log"|"emit-metric"|"disable",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_create_usage_limit
redshift_create_usage_limit <- function(ClusterIdentifier, FeatureType, LimitType, Amount, Period = NULL, BreachAction = NULL, Tags = NULL) {
op <- new_operation(
name = "CreateUsageLimit",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$create_usage_limit_input(ClusterIdentifier = ClusterIdentifier, FeatureType = FeatureType, LimitType = LimitType, Amount = Amount, Period = Period, BreachAction = BreachAction, Tags = Tags)
output <- .redshift$create_usage_limit_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$create_usage_limit <- redshift_create_usage_limit
#' Deletes a previously provisioned cluster without its final snapshot
#' being created
#'
#' @description
#' Deletes a previously provisioned cluster without its final snapshot
#' being created. A successful response from the web service indicates that
#' the request was received correctly. Use
#' [`describe_clusters`][redshift_describe_clusters] to monitor the status
#' of the deletion. The delete operation cannot be canceled or reverted
#' once submitted. For more information about managing clusters, go to
#' [Amazon Redshift
#' Clusters](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' If you want to shut down the cluster and retain it for future use, set
#' *SkipFinalClusterSnapshot* to `false` and specify a name for
#' *FinalClusterSnapshotIdentifier*. You can later restore this snapshot to
#' resume using the cluster. If a final cluster snapshot is requested, the
#' status of the cluster will be "final-snapshot" while the snapshot is
#' being taken, then it's "deleting" once Amazon Redshift begins deleting
#' the cluster.
#'
#' For more information about managing clusters, go to [Amazon Redshift
#' Clusters](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' @usage
#' redshift_delete_cluster(ClusterIdentifier, SkipFinalClusterSnapshot,
#' FinalClusterSnapshotIdentifier, FinalClusterSnapshotRetentionPeriod)
#'
#' @param ClusterIdentifier [required] The identifier of the cluster to be deleted.
#'
#' Constraints:
#'
#' - Must contain lowercase characters.
#'
#' - Must contain from 1 to 63 alphanumeric characters or hyphens.
#'
#' - First character must be a letter.
#'
#' - Cannot end with a hyphen or contain two consecutive hyphens.
#' @param SkipFinalClusterSnapshot Determines whether a final snapshot of the cluster is created before
#' Amazon Redshift deletes the cluster. If `true`, a final cluster snapshot
#' is not created. If `false`, a final cluster snapshot is created before
#' the cluster is deleted.
#'
#' The *FinalClusterSnapshotIdentifier* parameter must be specified if
#' *SkipFinalClusterSnapshot* is `false`.
#'
#' Default: `false`
#' @param FinalClusterSnapshotIdentifier The identifier of the final snapshot that is to be created immediately
#' before deleting the cluster. If this parameter is provided,
#' *SkipFinalClusterSnapshot* must be `false`.
#'
#' Constraints:
#'
#' - Must be 1 to 255 alphanumeric characters.
#'
#' - First character must be a letter.
#'
#' - Cannot end with a hyphen or contain two consecutive hyphens.
#' @param FinalClusterSnapshotRetentionPeriod The number of days that a manual snapshot is retained. If the value is
#' -1, the manual snapshot is retained indefinitely.
#'
#' The value must be either -1 or an integer between 1 and 3,653.
#'
#' The default value is -1.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Cluster = list(
#' ClusterIdentifier = "string",
#' NodeType = "string",
#' ClusterStatus = "string",
#' ClusterAvailabilityStatus = "string",
#' ModifyStatus = "string",
#' MasterUsername = "string",
#' DBName = "string",
#' Endpoint = list(
#' Address = "string",
#' Port = 123,
#' VpcEndpoints = list(
#' list(
#' VpcEndpointId = "string"
#' )
#' )
#' ),
#' ClusterCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' AutomatedSnapshotRetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' ClusterSecurityGroups = list(
#' list(
#' ClusterSecurityGroupName = "string",
#' Status = "string"
#' )
#' ),
#' VpcSecurityGroups = list(
#' list(
#' VpcSecurityGroupId = "string",
#' Status = "string"
#' )
#' ),
#' ClusterParameterGroups = list(
#' list(
#' ParameterGroupName = "string",
#' ParameterApplyStatus = "string",
#' ClusterParameterStatusList = list(
#' list(
#' ParameterName = "string",
#' ParameterApplyStatus = "string",
#' ParameterApplyErrorDescription = "string"
#' )
#' )
#' )
#' ),
#' ClusterSubnetGroupName = "string",
#' VpcId = "string",
#' AvailabilityZone = "string",
#' PreferredMaintenanceWindow = "string",
#' PendingModifiedValues = list(
#' MasterUserPassword = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' ClusterType = "string",
#' ClusterVersion = "string",
#' AutomatedSnapshotRetentionPeriod = 123,
#' ClusterIdentifier = "string",
#' PubliclyAccessible = TRUE|FALSE,
#' EnhancedVpcRouting = TRUE|FALSE,
#' MaintenanceTrackName = "string",
#' EncryptionType = "string"
#' ),
#' ClusterVersion = "string",
#' AllowVersionUpgrade = TRUE|FALSE,
#' NumberOfNodes = 123,
#' PubliclyAccessible = TRUE|FALSE,
#' Encrypted = TRUE|FALSE,
#' RestoreStatus = list(
#' Status = "string",
#' CurrentRestoreRateInMegaBytesPerSecond = 123.0,
#' SnapshotSizeInMegaBytes = 123,
#' ProgressInMegaBytes = 123,
#' ElapsedTimeInSeconds = 123,
#' EstimatedTimeToCompletionInSeconds = 123
#' ),
#' DataTransferProgress = list(
#' Status = "string",
#' CurrentRateInMegaBytesPerSecond = 123.0,
#' TotalDataInMegaBytes = 123,
#' DataTransferredInMegaBytes = 123,
#' EstimatedTimeToCompletionInSeconds = 123,
#' ElapsedTimeInSeconds = 123
#' ),
#' HsmStatus = list(
#' HsmClientCertificateIdentifier = "string",
#' HsmConfigurationIdentifier = "string",
#' Status = "string"
#' ),
#' ClusterSnapshotCopyStatus = list(
#' DestinationRegion = "string",
#' RetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' SnapshotCopyGrantName = "string"
#' ),
#' ClusterPublicKey = "string",
#' ClusterNodes = list(
#' list(
#' NodeRole = "string",
#' PrivateIPAddress = "string",
#' PublicIPAddress = "string"
#' )
#' ),
#' ElasticIpStatus = list(
#' ElasticIp = "string",
#' Status = "string"
#' ),
#' ClusterRevisionNumber = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' ),
#' KmsKeyId = "string",
#' EnhancedVpcRouting = TRUE|FALSE,
#' IamRoles = list(
#' list(
#' IamRoleArn = "string",
#' ApplyStatus = "string"
#' )
#' ),
#' PendingActions = list(
#' "string"
#' ),
#' MaintenanceTrackName = "string",
#' ElasticResizeNumberOfNodeOptions = "string",
#' DeferredMaintenanceWindows = list(
#' list(
#' DeferMaintenanceIdentifier = "string",
#' DeferMaintenanceStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' DeferMaintenanceEndTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' ),
#' SnapshotScheduleIdentifier = "string",
#' SnapshotScheduleState = "MODIFYING"|"ACTIVE"|"FAILED",
#' ExpectedNextSnapshotScheduleTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ExpectedNextSnapshotScheduleTimeStatus = "string",
#' NextMaintenanceWindowStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ResizeInfo = list(
#' ResizeType = "string",
#' AllowCancelResize = TRUE|FALSE
#' ),
#' AvailabilityZoneRelocationStatus = "string",
#' ClusterNamespaceArn = "string"
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$delete_cluster(
#' ClusterIdentifier = "string",
#' SkipFinalClusterSnapshot = TRUE|FALSE,
#' FinalClusterSnapshotIdentifier = "string",
#' FinalClusterSnapshotRetentionPeriod = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_delete_cluster
redshift_delete_cluster <- function(ClusterIdentifier, SkipFinalClusterSnapshot = NULL, FinalClusterSnapshotIdentifier = NULL, FinalClusterSnapshotRetentionPeriod = NULL) {
op <- new_operation(
name = "DeleteCluster",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$delete_cluster_input(ClusterIdentifier = ClusterIdentifier, SkipFinalClusterSnapshot = SkipFinalClusterSnapshot, FinalClusterSnapshotIdentifier = FinalClusterSnapshotIdentifier, FinalClusterSnapshotRetentionPeriod = FinalClusterSnapshotRetentionPeriod)
output <- .redshift$delete_cluster_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$delete_cluster <- redshift_delete_cluster
#' Deletes a specified Amazon Redshift parameter group
#'
#' @description
#' Deletes a specified Amazon Redshift parameter group.
#'
#' You cannot delete a parameter group if it is associated with a cluster.
#'
#' @usage
#' redshift_delete_cluster_parameter_group(ParameterGroupName)
#'
#' @param ParameterGroupName [required] The name of the parameter group to be deleted.
#'
#' Constraints:
#'
#' - Must be the name of an existing cluster parameter group.
#'
#' - Cannot delete a default cluster parameter group.
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$delete_cluster_parameter_group(
#' ParameterGroupName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_delete_cluster_parameter_group
redshift_delete_cluster_parameter_group <- function(ParameterGroupName) {
op <- new_operation(
name = "DeleteClusterParameterGroup",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$delete_cluster_parameter_group_input(ParameterGroupName = ParameterGroupName)
output <- .redshift$delete_cluster_parameter_group_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$delete_cluster_parameter_group <- redshift_delete_cluster_parameter_group
#' Deletes an Amazon Redshift security group
#'
#' @description
#' Deletes an Amazon Redshift security group.
#'
#' You cannot delete a security group that is associated with any clusters.
#' You cannot delete the default security group.
#'
#' For information about managing security groups, go to [Amazon Redshift
#' Cluster Security
#' Groups](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-security-groups.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' @usage
#' redshift_delete_cluster_security_group(ClusterSecurityGroupName)
#'
#' @param ClusterSecurityGroupName [required] The name of the cluster security group to be deleted.
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$delete_cluster_security_group(
#' ClusterSecurityGroupName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_delete_cluster_security_group
redshift_delete_cluster_security_group <- function(ClusterSecurityGroupName) {
op <- new_operation(
name = "DeleteClusterSecurityGroup",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$delete_cluster_security_group_input(ClusterSecurityGroupName = ClusterSecurityGroupName)
output <- .redshift$delete_cluster_security_group_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$delete_cluster_security_group <- redshift_delete_cluster_security_group
#' Deletes the specified manual snapshot
#'
#' @description
#' Deletes the specified manual snapshot. The snapshot must be in the
#' `available` state, with no other users authorized to access the
#' snapshot.
#'
#' Unlike automated snapshots, manual snapshots are retained even after you
#' delete your cluster. Amazon Redshift does not delete your manual
#' snapshots. You must delete manual snapshot explicitly to avoid getting
#' charged. If other accounts are authorized to access the snapshot, you
#' must revoke all of the authorizations before you can delete the
#' snapshot.
#'
#' @usage
#' redshift_delete_cluster_snapshot(SnapshotIdentifier,
#' SnapshotClusterIdentifier)
#'
#' @param SnapshotIdentifier [required] The unique identifier of the manual snapshot to be deleted.
#'
#' Constraints: Must be the name of an existing snapshot that is in the
#' `available`, `failed`, or `cancelled` state.
#' @param SnapshotClusterIdentifier The unique identifier of the cluster the snapshot was created from. This
#' parameter is required if your IAM user has a policy containing a
#' snapshot resource element that specifies anything other than * for the
#' cluster name.
#'
#' Constraints: Must be the name of valid cluster.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Snapshot = list(
#' SnapshotIdentifier = "string",
#' ClusterIdentifier = "string",
#' SnapshotCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' Status = "string",
#' Port = 123,
#' AvailabilityZone = "string",
#' ClusterCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' MasterUsername = "string",
#' ClusterVersion = "string",
#' EngineFullVersion = "string",
#' SnapshotType = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' DBName = "string",
#' VpcId = "string",
#' Encrypted = TRUE|FALSE,
#' KmsKeyId = "string",
#' EncryptedWithHSM = TRUE|FALSE,
#' AccountsWithRestoreAccess = list(
#' list(
#' AccountId = "string",
#' AccountAlias = "string"
#' )
#' ),
#' OwnerAccount = "string",
#' TotalBackupSizeInMegaBytes = 123.0,
#' ActualIncrementalBackupSizeInMegaBytes = 123.0,
#' BackupProgressInMegaBytes = 123.0,
#' CurrentBackupRateInMegaBytesPerSecond = 123.0,
#' EstimatedSecondsToCompletion = 123,
#' ElapsedTimeInSeconds = 123,
#' SourceRegion = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' ),
#' RestorableNodeTypes = list(
#' "string"
#' ),
#' EnhancedVpcRouting = TRUE|FALSE,
#' MaintenanceTrackName = "string",
#' ManualSnapshotRetentionPeriod = 123,
#' ManualSnapshotRemainingDays = 123,
#' SnapshotRetentionStartTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$delete_cluster_snapshot(
#' SnapshotIdentifier = "string",
#' SnapshotClusterIdentifier = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_delete_cluster_snapshot
redshift_delete_cluster_snapshot <- function(SnapshotIdentifier, SnapshotClusterIdentifier = NULL) {
op <- new_operation(
name = "DeleteClusterSnapshot",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$delete_cluster_snapshot_input(SnapshotIdentifier = SnapshotIdentifier, SnapshotClusterIdentifier = SnapshotClusterIdentifier)
output <- .redshift$delete_cluster_snapshot_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$delete_cluster_snapshot <- redshift_delete_cluster_snapshot
#' Deletes the specified cluster subnet group
#'
#' @description
#' Deletes the specified cluster subnet group.
#'
#' @usage
#' redshift_delete_cluster_subnet_group(ClusterSubnetGroupName)
#'
#' @param ClusterSubnetGroupName [required] The name of the cluster subnet group name to be deleted.
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$delete_cluster_subnet_group(
#' ClusterSubnetGroupName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_delete_cluster_subnet_group
redshift_delete_cluster_subnet_group <- function(ClusterSubnetGroupName) {
op <- new_operation(
name = "DeleteClusterSubnetGroup",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$delete_cluster_subnet_group_input(ClusterSubnetGroupName = ClusterSubnetGroupName)
output <- .redshift$delete_cluster_subnet_group_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$delete_cluster_subnet_group <- redshift_delete_cluster_subnet_group
#' Deletes an Amazon Redshift event notification subscription
#'
#' @description
#' Deletes an Amazon Redshift event notification subscription.
#'
#' @usage
#' redshift_delete_event_subscription(SubscriptionName)
#'
#' @param SubscriptionName [required] The name of the Amazon Redshift event notification subscription to be
#' deleted.
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$delete_event_subscription(
#' SubscriptionName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_delete_event_subscription
redshift_delete_event_subscription <- function(SubscriptionName) {
op <- new_operation(
name = "DeleteEventSubscription",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$delete_event_subscription_input(SubscriptionName = SubscriptionName)
output <- .redshift$delete_event_subscription_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$delete_event_subscription <- redshift_delete_event_subscription
#' Deletes the specified HSM client certificate
#'
#' @description
#' Deletes the specified HSM client certificate.
#'
#' @usage
#' redshift_delete_hsm_client_certificate(HsmClientCertificateIdentifier)
#'
#' @param HsmClientCertificateIdentifier [required] The identifier of the HSM client certificate to be deleted.
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$delete_hsm_client_certificate(
#' HsmClientCertificateIdentifier = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_delete_hsm_client_certificate
redshift_delete_hsm_client_certificate <- function(HsmClientCertificateIdentifier) {
op <- new_operation(
name = "DeleteHsmClientCertificate",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$delete_hsm_client_certificate_input(HsmClientCertificateIdentifier = HsmClientCertificateIdentifier)
output <- .redshift$delete_hsm_client_certificate_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$delete_hsm_client_certificate <- redshift_delete_hsm_client_certificate
#' Deletes the specified Amazon Redshift HSM configuration
#'
#' @description
#' Deletes the specified Amazon Redshift HSM configuration.
#'
#' @usage
#' redshift_delete_hsm_configuration(HsmConfigurationIdentifier)
#'
#' @param HsmConfigurationIdentifier [required] The identifier of the Amazon Redshift HSM configuration to be deleted.
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$delete_hsm_configuration(
#' HsmConfigurationIdentifier = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_delete_hsm_configuration
redshift_delete_hsm_configuration <- function(HsmConfigurationIdentifier) {
op <- new_operation(
name = "DeleteHsmConfiguration",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$delete_hsm_configuration_input(HsmConfigurationIdentifier = HsmConfigurationIdentifier)
output <- .redshift$delete_hsm_configuration_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$delete_hsm_configuration <- redshift_delete_hsm_configuration
#' Deletes a scheduled action
#'
#' @description
#' Deletes a scheduled action.
#'
#' @usage
#' redshift_delete_scheduled_action(ScheduledActionName)
#'
#' @param ScheduledActionName [required] The name of the scheduled action to delete.
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$delete_scheduled_action(
#' ScheduledActionName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_delete_scheduled_action
redshift_delete_scheduled_action <- function(ScheduledActionName) {
op <- new_operation(
name = "DeleteScheduledAction",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$delete_scheduled_action_input(ScheduledActionName = ScheduledActionName)
output <- .redshift$delete_scheduled_action_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$delete_scheduled_action <- redshift_delete_scheduled_action
#' Deletes the specified snapshot copy grant
#'
#' @description
#' Deletes the specified snapshot copy grant.
#'
#' @usage
#' redshift_delete_snapshot_copy_grant(SnapshotCopyGrantName)
#'
#' @param SnapshotCopyGrantName [required] The name of the snapshot copy grant to delete.
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$delete_snapshot_copy_grant(
#' SnapshotCopyGrantName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_delete_snapshot_copy_grant
redshift_delete_snapshot_copy_grant <- function(SnapshotCopyGrantName) {
op <- new_operation(
name = "DeleteSnapshotCopyGrant",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$delete_snapshot_copy_grant_input(SnapshotCopyGrantName = SnapshotCopyGrantName)
output <- .redshift$delete_snapshot_copy_grant_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$delete_snapshot_copy_grant <- redshift_delete_snapshot_copy_grant
#' Deletes a snapshot schedule
#'
#' @description
#' Deletes a snapshot schedule.
#'
#' @usage
#' redshift_delete_snapshot_schedule(ScheduleIdentifier)
#'
#' @param ScheduleIdentifier [required] A unique identifier of the snapshot schedule to delete.
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$delete_snapshot_schedule(
#' ScheduleIdentifier = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_delete_snapshot_schedule
redshift_delete_snapshot_schedule <- function(ScheduleIdentifier) {
op <- new_operation(
name = "DeleteSnapshotSchedule",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$delete_snapshot_schedule_input(ScheduleIdentifier = ScheduleIdentifier)
output <- .redshift$delete_snapshot_schedule_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$delete_snapshot_schedule <- redshift_delete_snapshot_schedule
#' Deletes tags from a resource
#'
#' @description
#' Deletes tags from a resource. You must provide the ARN of the resource
#' from which you want to delete the tag or tags.
#'
#' @usage
#' redshift_delete_tags(ResourceName, TagKeys)
#'
#' @param ResourceName [required] The Amazon Resource Name (ARN) from which you want to remove the tag or
#' tags. For example, `arn:aws:redshift:us-east-2:123456789:cluster:t1`.
#' @param TagKeys [required] The tag key that you want to delete.
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$delete_tags(
#' ResourceName = "string",
#' TagKeys = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_delete_tags
redshift_delete_tags <- function(ResourceName, TagKeys) {
op <- new_operation(
name = "DeleteTags",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$delete_tags_input(ResourceName = ResourceName, TagKeys = TagKeys)
output <- .redshift$delete_tags_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$delete_tags <- redshift_delete_tags
#' Deletes a usage limit from a cluster
#'
#' @description
#' Deletes a usage limit from a cluster.
#'
#' @usage
#' redshift_delete_usage_limit(UsageLimitId)
#'
#' @param UsageLimitId [required] The identifier of the usage limit to delete.
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$delete_usage_limit(
#' UsageLimitId = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_delete_usage_limit
redshift_delete_usage_limit <- function(UsageLimitId) {
op <- new_operation(
name = "DeleteUsageLimit",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$delete_usage_limit_input(UsageLimitId = UsageLimitId)
output <- .redshift$delete_usage_limit_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$delete_usage_limit <- redshift_delete_usage_limit
#' Returns a list of attributes attached to an account
#'
#' @description
#' Returns a list of attributes attached to an account
#'
#' @usage
#' redshift_describe_account_attributes(AttributeNames)
#'
#' @param AttributeNames A list of attribute names.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' AccountAttributes = list(
#' list(
#' AttributeName = "string",
#' AttributeValues = list(
#' list(
#' AttributeValue = "string"
#' )
#' )
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_account_attributes(
#' AttributeNames = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_describe_account_attributes
redshift_describe_account_attributes <- function(AttributeNames = NULL) {
op <- new_operation(
name = "DescribeAccountAttributes",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_account_attributes_input(AttributeNames = AttributeNames)
output <- .redshift$describe_account_attributes_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_account_attributes <- redshift_describe_account_attributes
#' Returns an array of ClusterDbRevision objects
#'
#' @description
#' Returns an array of `ClusterDbRevision` objects.
#'
#' @usage
#' redshift_describe_cluster_db_revisions(ClusterIdentifier, MaxRecords,
#' Marker)
#'
#' @param ClusterIdentifier A unique identifier for a cluster whose `ClusterDbRevisions` you are
#' requesting. This parameter is case sensitive. All clusters defined for
#' an account are returned by default.
#' @param MaxRecords The maximum number of response records to return in each call. If the
#' number of remaining response records exceeds the specified MaxRecords
#' value, a value is returned in the `marker` field of the response. You
#' can retrieve the next set of response records by providing the returned
#' `marker` value in the `marker` parameter and retrying the request.
#'
#' Default: 100
#'
#' Constraints: minimum 20, maximum 100.
#' @param Marker An optional parameter that specifies the starting point for returning a
#' set of response records. When the results of a
#' [`describe_cluster_db_revisions`][redshift_describe_cluster_db_revisions]
#' request exceed the value specified in `MaxRecords`, Amazon Redshift
#' returns a value in the `marker` field of the response. You can retrieve
#' the next set of response records by providing the returned `marker`
#' value in the `marker` parameter and retrying the request.
#'
#' Constraints: You can specify either the `ClusterIdentifier` parameter,
#' or the `marker` parameter, but not both.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Marker = "string",
#' ClusterDbRevisions = list(
#' list(
#' ClusterIdentifier = "string",
#' CurrentDatabaseRevision = "string",
#' DatabaseRevisionReleaseDate = as.POSIXct(
#' "2015-01-01"
#' ),
#' RevisionTargets = list(
#' list(
#' DatabaseRevision = "string",
#' Description = "string",
#' DatabaseRevisionReleaseDate = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' )
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_cluster_db_revisions(
#' ClusterIdentifier = "string",
#' MaxRecords = 123,
#' Marker = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_describe_cluster_db_revisions
redshift_describe_cluster_db_revisions <- function(ClusterIdentifier = NULL, MaxRecords = NULL, Marker = NULL) {
op <- new_operation(
name = "DescribeClusterDbRevisions",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_cluster_db_revisions_input(ClusterIdentifier = ClusterIdentifier, MaxRecords = MaxRecords, Marker = Marker)
output <- .redshift$describe_cluster_db_revisions_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_cluster_db_revisions <- redshift_describe_cluster_db_revisions
#' Returns a list of Amazon Redshift parameter groups, including parameter
#' groups you created and the default parameter group
#'
#' @description
#' Returns a list of Amazon Redshift parameter groups, including parameter
#' groups you created and the default parameter group. For each parameter
#' group, the response includes the parameter group name, description, and
#' parameter group family name. You can optionally specify a name to
#' retrieve the description of a specific parameter group.
#'
#' For more information about parameters and parameter groups, go to
#' [Amazon Redshift Parameter
#' Groups](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' If you specify both tag keys and tag values in the same request, Amazon
#' Redshift returns all parameter groups that match any combination of the
#' specified keys and values. For example, if you have `owner` and
#' `environment` for tag keys, and `admin` and `test` for tag values, all
#' parameter groups that have any combination of those values are returned.
#'
#' If both tag keys and values are omitted from the request, parameter
#' groups are returned regardless of whether they have tag keys or values
#' associated with them.
#'
#' @usage
#' redshift_describe_cluster_parameter_groups(ParameterGroupName,
#' MaxRecords, Marker, TagKeys, TagValues)
#'
#' @param ParameterGroupName The name of a specific parameter group for which to return details. By
#' default, details about all parameter groups and the default parameter
#' group are returned.
#' @param MaxRecords The maximum number of response records to return in each call. If the
#' number of remaining response records exceeds the specified `MaxRecords`
#' value, a value is returned in a `marker` field of the response. You can
#' retrieve the next set of records by retrying the command with the
#' returned marker value.
#'
#' Default: `100`
#'
#' Constraints: minimum 20, maximum 100.
#' @param Marker An optional parameter that specifies the starting point to return a set
#' of response records. When the results of a
#' [`describe_cluster_parameter_groups`][redshift_describe_cluster_parameter_groups]
#' request exceed the value specified in `MaxRecords`, AWS returns a value
#' in the `Marker` field of the response. You can retrieve the next set of
#' response records by providing the returned marker value in the `Marker`
#' parameter and retrying the request.
#' @param TagKeys A tag key or keys for which you want to return all matching cluster
#' parameter groups that are associated with the specified key or keys. For
#' example, suppose that you have parameter groups that are tagged with
#' keys called `owner` and `environment`. If you specify both of these tag
#' keys in the request, Amazon Redshift returns a response with the
#' parameter groups that have either or both of these tag keys associated
#' with them.
#' @param TagValues A tag value or values for which you want to return all matching cluster
#' parameter groups that are associated with the specified tag value or
#' values. For example, suppose that you have parameter groups that are
#' tagged with values called `admin` and `test`. If you specify both of
#' these tag values in the request, Amazon Redshift returns a response with
#' the parameter groups that have either or both of these tag values
#' associated with them.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Marker = "string",
#' ParameterGroups = list(
#' list(
#' ParameterGroupName = "string",
#' ParameterGroupFamily = "string",
#' Description = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_cluster_parameter_groups(
#' ParameterGroupName = "string",
#' MaxRecords = 123,
#' Marker = "string",
#' TagKeys = list(
#' "string"
#' ),
#' TagValues = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_describe_cluster_parameter_groups
redshift_describe_cluster_parameter_groups <- function(ParameterGroupName = NULL, MaxRecords = NULL, Marker = NULL, TagKeys = NULL, TagValues = NULL) {
op <- new_operation(
name = "DescribeClusterParameterGroups",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_cluster_parameter_groups_input(ParameterGroupName = ParameterGroupName, MaxRecords = MaxRecords, Marker = Marker, TagKeys = TagKeys, TagValues = TagValues)
output <- .redshift$describe_cluster_parameter_groups_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_cluster_parameter_groups <- redshift_describe_cluster_parameter_groups
#' Returns a detailed list of parameters contained within the specified
#' Amazon Redshift parameter group
#'
#' @description
#' Returns a detailed list of parameters contained within the specified
#' Amazon Redshift parameter group. For each parameter the response
#' includes information such as parameter name, description, data type,
#' value, whether the parameter value is modifiable, and so on.
#'
#' You can specify *source* filter to retrieve parameters of only specific
#' type. For example, to retrieve parameters that were modified by a user
#' action such as from
#' [`modify_cluster_parameter_group`][redshift_modify_cluster_parameter_group],
#' you can specify *source* equal to *user*.
#'
#' For more information about parameters and parameter groups, go to
#' [Amazon Redshift Parameter
#' Groups](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' @usage
#' redshift_describe_cluster_parameters(ParameterGroupName, Source,
#' MaxRecords, Marker)
#'
#' @param ParameterGroupName [required] The name of a cluster parameter group for which to return details.
#' @param Source The parameter types to return. Specify `user` to show parameters that
#' are different form the default. Similarly, specify `engine-default` to
#' show parameters that are the same as the default parameter group.
#'
#' Default: All parameter types returned.
#'
#' Valid Values: `user` | `engine-default`
#' @param MaxRecords The maximum number of response records to return in each call. If the
#' number of remaining response records exceeds the specified `MaxRecords`
#' value, a value is returned in a `marker` field of the response. You can
#' retrieve the next set of records by retrying the command with the
#' returned marker value.
#'
#' Default: `100`
#'
#' Constraints: minimum 20, maximum 100.
#' @param Marker An optional parameter that specifies the starting point to return a set
#' of response records. When the results of a
#' [`describe_cluster_parameters`][redshift_describe_cluster_parameters]
#' request exceed the value specified in `MaxRecords`, AWS returns a value
#' in the `Marker` field of the response. You can retrieve the next set of
#' response records by providing the returned marker value in the `Marker`
#' parameter and retrying the request.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Parameters = list(
#' list(
#' ParameterName = "string",
#' ParameterValue = "string",
#' Description = "string",
#' Source = "string",
#' DataType = "string",
#' AllowedValues = "string",
#' ApplyType = "static"|"dynamic",
#' IsModifiable = TRUE|FALSE,
#' MinimumEngineVersion = "string"
#' )
#' ),
#' Marker = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_cluster_parameters(
#' ParameterGroupName = "string",
#' Source = "string",
#' MaxRecords = 123,
#' Marker = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_describe_cluster_parameters
redshift_describe_cluster_parameters <- function(ParameterGroupName, Source = NULL, MaxRecords = NULL, Marker = NULL) {
op <- new_operation(
name = "DescribeClusterParameters",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_cluster_parameters_input(ParameterGroupName = ParameterGroupName, Source = Source, MaxRecords = MaxRecords, Marker = Marker)
output <- .redshift$describe_cluster_parameters_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_cluster_parameters <- redshift_describe_cluster_parameters
#' Returns information about Amazon Redshift security groups
#'
#' @description
#' Returns information about Amazon Redshift security groups. If the name
#' of a security group is specified, the response will contain only
#' information about only that security group.
#'
#' For information about managing security groups, go to [Amazon Redshift
#' Cluster Security
#' Groups](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-security-groups.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' If you specify both tag keys and tag values in the same request, Amazon
#' Redshift returns all security groups that match any combination of the
#' specified keys and values. For example, if you have `owner` and
#' `environment` for tag keys, and `admin` and `test` for tag values, all
#' security groups that have any combination of those values are returned.
#'
#' If both tag keys and values are omitted from the request, security
#' groups are returned regardless of whether they have tag keys or values
#' associated with them.
#'
#' @usage
#' redshift_describe_cluster_security_groups(ClusterSecurityGroupName,
#' MaxRecords, Marker, TagKeys, TagValues)
#'
#' @param ClusterSecurityGroupName The name of a cluster security group for which you are requesting
#' details. You can specify either the **Marker** parameter or a
#' **ClusterSecurityGroupName** parameter, but not both.
#'
#' Example: `securitygroup1`
#' @param MaxRecords The maximum number of response records to return in each call. If the
#' number of remaining response records exceeds the specified `MaxRecords`
#' value, a value is returned in a `marker` field of the response. You can
#' retrieve the next set of records by retrying the command with the
#' returned marker value.
#'
#' Default: `100`
#'
#' Constraints: minimum 20, maximum 100.
#' @param Marker An optional parameter that specifies the starting point to return a set
#' of response records. When the results of a
#' [`describe_cluster_security_groups`][redshift_describe_cluster_security_groups]
#' request exceed the value specified in `MaxRecords`, AWS returns a value
#' in the `Marker` field of the response. You can retrieve the next set of
#' response records by providing the returned marker value in the `Marker`
#' parameter and retrying the request.
#'
#' Constraints: You can specify either the **ClusterSecurityGroupName**
#' parameter or the **Marker** parameter, but not both.
#' @param TagKeys A tag key or keys for which you want to return all matching cluster
#' security groups that are associated with the specified key or keys. For
#' example, suppose that you have security groups that are tagged with keys
#' called `owner` and `environment`. If you specify both of these tag keys
#' in the request, Amazon Redshift returns a response with the security
#' groups that have either or both of these tag keys associated with them.
#' @param TagValues A tag value or values for which you want to return all matching cluster
#' security groups that are associated with the specified tag value or
#' values. For example, suppose that you have security groups that are
#' tagged with values called `admin` and `test`. If you specify both of
#' these tag values in the request, Amazon Redshift returns a response with
#' the security groups that have either or both of these tag values
#' associated with them.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Marker = "string",
#' ClusterSecurityGroups = list(
#' list(
#' ClusterSecurityGroupName = "string",
#' Description = "string",
#' EC2SecurityGroups = list(
#' list(
#' Status = "string",
#' EC2SecurityGroupName = "string",
#' EC2SecurityGroupOwnerId = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' ),
#' IPRanges = list(
#' list(
#' Status = "string",
#' CIDRIP = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' ),
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_cluster_security_groups(
#' ClusterSecurityGroupName = "string",
#' MaxRecords = 123,
#' Marker = "string",
#' TagKeys = list(
#' "string"
#' ),
#' TagValues = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_describe_cluster_security_groups
redshift_describe_cluster_security_groups <- function(ClusterSecurityGroupName = NULL, MaxRecords = NULL, Marker = NULL, TagKeys = NULL, TagValues = NULL) {
op <- new_operation(
name = "DescribeClusterSecurityGroups",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_cluster_security_groups_input(ClusterSecurityGroupName = ClusterSecurityGroupName, MaxRecords = MaxRecords, Marker = Marker, TagKeys = TagKeys, TagValues = TagValues)
output <- .redshift$describe_cluster_security_groups_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_cluster_security_groups <- redshift_describe_cluster_security_groups
#' Returns one or more snapshot objects, which contain metadata about your
#' cluster snapshots
#'
#' @description
#' Returns one or more snapshot objects, which contain metadata about your
#' cluster snapshots. By default, this operation returns information about
#' all snapshots of all clusters that are owned by you AWS customer
#' account. No information is returned for snapshots owned by inactive AWS
#' customer accounts.
#'
#' If you specify both tag keys and tag values in the same request, Amazon
#' Redshift returns all snapshots that match any combination of the
#' specified keys and values. For example, if you have `owner` and
#' `environment` for tag keys, and `admin` and `test` for tag values, all
#' snapshots that have any combination of those values are returned. Only
#' snapshots that you own are returned in the response; shared snapshots
#' are not returned with the tag key and tag value request parameters.
#'
#' If both tag keys and values are omitted from the request, snapshots are
#' returned regardless of whether they have tag keys or values associated
#' with them.
#'
#' @usage
#' redshift_describe_cluster_snapshots(ClusterIdentifier,
#' SnapshotIdentifier, SnapshotType, StartTime, EndTime, MaxRecords,
#' Marker, OwnerAccount, TagKeys, TagValues, ClusterExists,
#' SortingEntities)
#'
#' @param ClusterIdentifier The identifier of the cluster which generated the requested snapshots.
#' @param SnapshotIdentifier The snapshot identifier of the snapshot about which to return
#' information.
#' @param SnapshotType The type of snapshots for which you are requesting information. By
#' default, snapshots of all types are returned.
#'
#' Valid Values: `automated` | `manual`
#' @param StartTime A value that requests only snapshots created at or after the specified
#' time. The time value is specified in ISO 8601 format. For more
#' information about ISO 8601, go to the [ISO8601 Wikipedia
#' page.](https://en.wikipedia.org/wiki/ISO_8601)
#'
#' Example: `2012-07-16T18:00:00Z`
#' @param EndTime A time value that requests only snapshots created at or before the
#' specified time. The time value is specified in ISO 8601 format. For more
#' information about ISO 8601, go to the [ISO8601 Wikipedia
#' page.](https://en.wikipedia.org/wiki/ISO_8601)
#'
#' Example: `2012-07-16T18:00:00Z`
#' @param MaxRecords The maximum number of response records to return in each call. If the
#' number of remaining response records exceeds the specified `MaxRecords`
#' value, a value is returned in a `marker` field of the response. You can
#' retrieve the next set of records by retrying the command with the
#' returned marker value.
#'
#' Default: `100`
#'
#' Constraints: minimum 20, maximum 100.
#' @param Marker An optional parameter that specifies the starting point to return a set
#' of response records. When the results of a
#' [`describe_cluster_snapshots`][redshift_describe_cluster_snapshots]
#' request exceed the value specified in `MaxRecords`, AWS returns a value
#' in the `Marker` field of the response. You can retrieve the next set of
#' response records by providing the returned marker value in the `Marker`
#' parameter and retrying the request.
#' @param OwnerAccount The AWS customer account used to create or copy the snapshot. Use this
#' field to filter the results to snapshots owned by a particular account.
#' To describe snapshots you own, either specify your AWS customer account,
#' or do not specify the parameter.
#' @param TagKeys A tag key or keys for which you want to return all matching cluster
#' snapshots that are associated with the specified key or keys. For
#' example, suppose that you have snapshots that are tagged with keys
#' called `owner` and `environment`. If you specify both of these tag keys
#' in the request, Amazon Redshift returns a response with the snapshots
#' that have either or both of these tag keys associated with them.
#' @param TagValues A tag value or values for which you want to return all matching cluster
#' snapshots that are associated with the specified tag value or values.
#' For example, suppose that you have snapshots that are tagged with values
#' called `admin` and `test`. If you specify both of these tag values in
#' the request, Amazon Redshift returns a response with the snapshots that
#' have either or both of these tag values associated with them.
#' @param ClusterExists A value that indicates whether to return snapshots only for an existing
#' cluster. You can perform table-level restore only by using a snapshot of
#' an existing cluster, that is, a cluster that has not been deleted.
#' Values for this parameter work as follows:
#'
#' - If `ClusterExists` is set to `true`, `ClusterIdentifier` is
#' required.
#'
#' - If `ClusterExists` is set to `false` and `ClusterIdentifier` isn't
#' specified, all snapshots associated with deleted clusters (orphaned
#' snapshots) are returned.
#'
#' - If `ClusterExists` is set to `false` and `ClusterIdentifier` is
#' specified for a deleted cluster, snapshots associated with that
#' cluster are returned.
#'
#' - If `ClusterExists` is set to `false` and `ClusterIdentifier` is
#' specified for an existing cluster, no snapshots are returned.
#' @param SortingEntities
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Marker = "string",
#' Snapshots = list(
#' list(
#' SnapshotIdentifier = "string",
#' ClusterIdentifier = "string",
#' SnapshotCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' Status = "string",
#' Port = 123,
#' AvailabilityZone = "string",
#' ClusterCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' MasterUsername = "string",
#' ClusterVersion = "string",
#' EngineFullVersion = "string",
#' SnapshotType = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' DBName = "string",
#' VpcId = "string",
#' Encrypted = TRUE|FALSE,
#' KmsKeyId = "string",
#' EncryptedWithHSM = TRUE|FALSE,
#' AccountsWithRestoreAccess = list(
#' list(
#' AccountId = "string",
#' AccountAlias = "string"
#' )
#' ),
#' OwnerAccount = "string",
#' TotalBackupSizeInMegaBytes = 123.0,
#' ActualIncrementalBackupSizeInMegaBytes = 123.0,
#' BackupProgressInMegaBytes = 123.0,
#' CurrentBackupRateInMegaBytesPerSecond = 123.0,
#' EstimatedSecondsToCompletion = 123,
#' ElapsedTimeInSeconds = 123,
#' SourceRegion = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' ),
#' RestorableNodeTypes = list(
#' "string"
#' ),
#' EnhancedVpcRouting = TRUE|FALSE,
#' MaintenanceTrackName = "string",
#' ManualSnapshotRetentionPeriod = 123,
#' ManualSnapshotRemainingDays = 123,
#' SnapshotRetentionStartTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_cluster_snapshots(
#' ClusterIdentifier = "string",
#' SnapshotIdentifier = "string",
#' SnapshotType = "string",
#' StartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' EndTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' MaxRecords = 123,
#' Marker = "string",
#' OwnerAccount = "string",
#' TagKeys = list(
#' "string"
#' ),
#' TagValues = list(
#' "string"
#' ),
#' ClusterExists = TRUE|FALSE,
#' SortingEntities = list(
#' list(
#' Attribute = "SOURCE_TYPE"|"TOTAL_SIZE"|"CREATE_TIME",
#' SortOrder = "ASC"|"DESC"
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_describe_cluster_snapshots
redshift_describe_cluster_snapshots <- function(ClusterIdentifier = NULL, SnapshotIdentifier = NULL, SnapshotType = NULL, StartTime = NULL, EndTime = NULL, MaxRecords = NULL, Marker = NULL, OwnerAccount = NULL, TagKeys = NULL, TagValues = NULL, ClusterExists = NULL, SortingEntities = NULL) {
op <- new_operation(
name = "DescribeClusterSnapshots",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_cluster_snapshots_input(ClusterIdentifier = ClusterIdentifier, SnapshotIdentifier = SnapshotIdentifier, SnapshotType = SnapshotType, StartTime = StartTime, EndTime = EndTime, MaxRecords = MaxRecords, Marker = Marker, OwnerAccount = OwnerAccount, TagKeys = TagKeys, TagValues = TagValues, ClusterExists = ClusterExists, SortingEntities = SortingEntities)
output <- .redshift$describe_cluster_snapshots_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_cluster_snapshots <- redshift_describe_cluster_snapshots
#' Returns one or more cluster subnet group objects, which contain metadata
#' about your cluster subnet groups
#'
#' @description
#' Returns one or more cluster subnet group objects, which contain metadata
#' about your cluster subnet groups. By default, this operation returns
#' information about all cluster subnet groups that are defined in you AWS
#' account.
#'
#' If you specify both tag keys and tag values in the same request, Amazon
#' Redshift returns all subnet groups that match any combination of the
#' specified keys and values. For example, if you have `owner` and
#' `environment` for tag keys, and `admin` and `test` for tag values, all
#' subnet groups that have any combination of those values are returned.
#'
#' If both tag keys and values are omitted from the request, subnet groups
#' are returned regardless of whether they have tag keys or values
#' associated with them.
#'
#' @usage
#' redshift_describe_cluster_subnet_groups(ClusterSubnetGroupName,
#' MaxRecords, Marker, TagKeys, TagValues)
#'
#' @param ClusterSubnetGroupName The name of the cluster subnet group for which information is requested.
#' @param MaxRecords The maximum number of response records to return in each call. If the
#' number of remaining response records exceeds the specified `MaxRecords`
#' value, a value is returned in a `marker` field of the response. You can
#' retrieve the next set of records by retrying the command with the
#' returned marker value.
#'
#' Default: `100`
#'
#' Constraints: minimum 20, maximum 100.
#' @param Marker An optional parameter that specifies the starting point to return a set
#' of response records. When the results of a
#' [`describe_cluster_subnet_groups`][redshift_describe_cluster_subnet_groups]
#' request exceed the value specified in `MaxRecords`, AWS returns a value
#' in the `Marker` field of the response. You can retrieve the next set of
#' response records by providing the returned marker value in the `Marker`
#' parameter and retrying the request.
#' @param TagKeys A tag key or keys for which you want to return all matching cluster
#' subnet groups that are associated with the specified key or keys. For
#' example, suppose that you have subnet groups that are tagged with keys
#' called `owner` and `environment`. If you specify both of these tag keys
#' in the request, Amazon Redshift returns a response with the subnet
#' groups that have either or both of these tag keys associated with them.
#' @param TagValues A tag value or values for which you want to return all matching cluster
#' subnet groups that are associated with the specified tag value or
#' values. For example, suppose that you have subnet groups that are tagged
#' with values called `admin` and `test`. If you specify both of these tag
#' values in the request, Amazon Redshift returns a response with the
#' subnet groups that have either or both of these tag values associated
#' with them.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Marker = "string",
#' ClusterSubnetGroups = list(
#' list(
#' ClusterSubnetGroupName = "string",
#' Description = "string",
#' VpcId = "string",
#' SubnetGroupStatus = "string",
#' Subnets = list(
#' list(
#' SubnetIdentifier = "string",
#' SubnetAvailabilityZone = list(
#' Name = "string",
#' SupportedPlatforms = list(
#' list(
#' Name = "string"
#' )
#' )
#' ),
#' SubnetStatus = "string"
#' )
#' ),
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_cluster_subnet_groups(
#' ClusterSubnetGroupName = "string",
#' MaxRecords = 123,
#' Marker = "string",
#' TagKeys = list(
#' "string"
#' ),
#' TagValues = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_describe_cluster_subnet_groups
redshift_describe_cluster_subnet_groups <- function(ClusterSubnetGroupName = NULL, MaxRecords = NULL, Marker = NULL, TagKeys = NULL, TagValues = NULL) {
op <- new_operation(
name = "DescribeClusterSubnetGroups",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_cluster_subnet_groups_input(ClusterSubnetGroupName = ClusterSubnetGroupName, MaxRecords = MaxRecords, Marker = Marker, TagKeys = TagKeys, TagValues = TagValues)
output <- .redshift$describe_cluster_subnet_groups_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_cluster_subnet_groups <- redshift_describe_cluster_subnet_groups
#' Returns a list of all the available maintenance tracks
#'
#' @description
#' Returns a list of all the available maintenance tracks.
#'
#' @usage
#' redshift_describe_cluster_tracks(MaintenanceTrackName, MaxRecords,
#' Marker)
#'
#' @param MaintenanceTrackName The name of the maintenance track.
#' @param MaxRecords An integer value for the maximum number of maintenance tracks to return.
#' @param Marker An optional parameter that specifies the starting point to return a set
#' of response records. When the results of a
#' [`describe_cluster_tracks`][redshift_describe_cluster_tracks] request
#' exceed the value specified in `MaxRecords`, Amazon Redshift returns a
#' value in the `Marker` field of the response. You can retrieve the next
#' set of response records by providing the returned marker value in the
#' `Marker` parameter and retrying the request.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' MaintenanceTracks = list(
#' list(
#' MaintenanceTrackName = "string",
#' DatabaseVersion = "string",
#' UpdateTargets = list(
#' list(
#' MaintenanceTrackName = "string",
#' DatabaseVersion = "string",
#' SupportedOperations = list(
#' list(
#' OperationName = "string"
#' )
#' )
#' )
#' )
#' )
#' ),
#' Marker = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_cluster_tracks(
#' MaintenanceTrackName = "string",
#' MaxRecords = 123,
#' Marker = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_describe_cluster_tracks
redshift_describe_cluster_tracks <- function(MaintenanceTrackName = NULL, MaxRecords = NULL, Marker = NULL) {
op <- new_operation(
name = "DescribeClusterTracks",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_cluster_tracks_input(MaintenanceTrackName = MaintenanceTrackName, MaxRecords = MaxRecords, Marker = Marker)
output <- .redshift$describe_cluster_tracks_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_cluster_tracks <- redshift_describe_cluster_tracks
#' Returns descriptions of the available Amazon Redshift cluster versions
#'
#' @description
#' Returns descriptions of the available Amazon Redshift cluster versions.
#' You can call this operation even before creating any clusters to learn
#' more about the Amazon Redshift versions. For more information about
#' managing clusters, go to [Amazon Redshift
#' Clusters](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' @usage
#' redshift_describe_cluster_versions(ClusterVersion,
#' ClusterParameterGroupFamily, MaxRecords, Marker)
#'
#' @param ClusterVersion The specific cluster version to return.
#'
#' Example: `1.0`
#' @param ClusterParameterGroupFamily The name of a specific cluster parameter group family to return details
#' for.
#'
#' Constraints:
#'
#' - Must be 1 to 255 alphanumeric characters
#'
#' - First character must be a letter
#'
#' - Cannot end with a hyphen or contain two consecutive hyphens
#' @param MaxRecords The maximum number of response records to return in each call. If the
#' number of remaining response records exceeds the specified `MaxRecords`
#' value, a value is returned in a `marker` field of the response. You can
#' retrieve the next set of records by retrying the command with the
#' returned marker value.
#'
#' Default: `100`
#'
#' Constraints: minimum 20, maximum 100.
#' @param Marker An optional parameter that specifies the starting point to return a set
#' of response records. When the results of a
#' [`describe_cluster_versions`][redshift_describe_cluster_versions]
#' request exceed the value specified in `MaxRecords`, AWS returns a value
#' in the `Marker` field of the response. You can retrieve the next set of
#' response records by providing the returned marker value in the `Marker`
#' parameter and retrying the request.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Marker = "string",
#' ClusterVersions = list(
#' list(
#' ClusterVersion = "string",
#' ClusterParameterGroupFamily = "string",
#' Description = "string"
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_cluster_versions(
#' ClusterVersion = "string",
#' ClusterParameterGroupFamily = "string",
#' MaxRecords = 123,
#' Marker = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_describe_cluster_versions
redshift_describe_cluster_versions <- function(ClusterVersion = NULL, ClusterParameterGroupFamily = NULL, MaxRecords = NULL, Marker = NULL) {
op <- new_operation(
name = "DescribeClusterVersions",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_cluster_versions_input(ClusterVersion = ClusterVersion, ClusterParameterGroupFamily = ClusterParameterGroupFamily, MaxRecords = MaxRecords, Marker = Marker)
output <- .redshift$describe_cluster_versions_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_cluster_versions <- redshift_describe_cluster_versions
#' Returns properties of provisioned clusters including general cluster
#' properties, cluster database properties, maintenance and backup
#' properties, and security and access properties
#'
#' @description
#' Returns properties of provisioned clusters including general cluster
#' properties, cluster database properties, maintenance and backup
#' properties, and security and access properties. This operation supports
#' pagination. For more information about managing clusters, go to [Amazon
#' Redshift
#' Clusters](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' If you specify both tag keys and tag values in the same request, Amazon
#' Redshift returns all clusters that match any combination of the
#' specified keys and values. For example, if you have `owner` and
#' `environment` for tag keys, and `admin` and `test` for tag values, all
#' clusters that have any combination of those values are returned.
#'
#' If both tag keys and values are omitted from the request, clusters are
#' returned regardless of whether they have tag keys or values associated
#' with them.
#'
#' @usage
#' redshift_describe_clusters(ClusterIdentifier, MaxRecords, Marker,
#' TagKeys, TagValues)
#'
#' @param ClusterIdentifier The unique identifier of a cluster whose properties you are requesting.
#' This parameter is case sensitive.
#'
#' The default is that all clusters defined for an account are returned.
#' @param MaxRecords The maximum number of response records to return in each call. If the
#' number of remaining response records exceeds the specified `MaxRecords`
#' value, a value is returned in a `marker` field of the response. You can
#' retrieve the next set of records by retrying the command with the
#' returned marker value.
#'
#' Default: `100`
#'
#' Constraints: minimum 20, maximum 100.
#' @param Marker An optional parameter that specifies the starting point to return a set
#' of response records. When the results of a
#' [`describe_clusters`][redshift_describe_clusters] request exceed the
#' value specified in `MaxRecords`, AWS returns a value in the `Marker`
#' field of the response. You can retrieve the next set of response records
#' by providing the returned marker value in the `Marker` parameter and
#' retrying the request.
#'
#' Constraints: You can specify either the **ClusterIdentifier** parameter
#' or the **Marker** parameter, but not both.
#' @param TagKeys A tag key or keys for which you want to return all matching clusters
#' that are associated with the specified key or keys. For example, suppose
#' that you have clusters that are tagged with keys called `owner` and
#' `environment`. If you specify both of these tag keys in the request,
#' Amazon Redshift returns a response with the clusters that have either or
#' both of these tag keys associated with them.
#' @param TagValues A tag value or values for which you want to return all matching clusters
#' that are associated with the specified tag value or values. For example,
#' suppose that you have clusters that are tagged with values called
#' `admin` and `test`. If you specify both of these tag values in the
#' request, Amazon Redshift returns a response with the clusters that have
#' either or both of these tag values associated with them.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Marker = "string",
#' Clusters = list(
#' list(
#' ClusterIdentifier = "string",
#' NodeType = "string",
#' ClusterStatus = "string",
#' ClusterAvailabilityStatus = "string",
#' ModifyStatus = "string",
#' MasterUsername = "string",
#' DBName = "string",
#' Endpoint = list(
#' Address = "string",
#' Port = 123,
#' VpcEndpoints = list(
#' list(
#' VpcEndpointId = "string"
#' )
#' )
#' ),
#' ClusterCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' AutomatedSnapshotRetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' ClusterSecurityGroups = list(
#' list(
#' ClusterSecurityGroupName = "string",
#' Status = "string"
#' )
#' ),
#' VpcSecurityGroups = list(
#' list(
#' VpcSecurityGroupId = "string",
#' Status = "string"
#' )
#' ),
#' ClusterParameterGroups = list(
#' list(
#' ParameterGroupName = "string",
#' ParameterApplyStatus = "string",
#' ClusterParameterStatusList = list(
#' list(
#' ParameterName = "string",
#' ParameterApplyStatus = "string",
#' ParameterApplyErrorDescription = "string"
#' )
#' )
#' )
#' ),
#' ClusterSubnetGroupName = "string",
#' VpcId = "string",
#' AvailabilityZone = "string",
#' PreferredMaintenanceWindow = "string",
#' PendingModifiedValues = list(
#' MasterUserPassword = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' ClusterType = "string",
#' ClusterVersion = "string",
#' AutomatedSnapshotRetentionPeriod = 123,
#' ClusterIdentifier = "string",
#' PubliclyAccessible = TRUE|FALSE,
#' EnhancedVpcRouting = TRUE|FALSE,
#' MaintenanceTrackName = "string",
#' EncryptionType = "string"
#' ),
#' ClusterVersion = "string",
#' AllowVersionUpgrade = TRUE|FALSE,
#' NumberOfNodes = 123,
#' PubliclyAccessible = TRUE|FALSE,
#' Encrypted = TRUE|FALSE,
#' RestoreStatus = list(
#' Status = "string",
#' CurrentRestoreRateInMegaBytesPerSecond = 123.0,
#' SnapshotSizeInMegaBytes = 123,
#' ProgressInMegaBytes = 123,
#' ElapsedTimeInSeconds = 123,
#' EstimatedTimeToCompletionInSeconds = 123
#' ),
#' DataTransferProgress = list(
#' Status = "string",
#' CurrentRateInMegaBytesPerSecond = 123.0,
#' TotalDataInMegaBytes = 123,
#' DataTransferredInMegaBytes = 123,
#' EstimatedTimeToCompletionInSeconds = 123,
#' ElapsedTimeInSeconds = 123
#' ),
#' HsmStatus = list(
#' HsmClientCertificateIdentifier = "string",
#' HsmConfigurationIdentifier = "string",
#' Status = "string"
#' ),
#' ClusterSnapshotCopyStatus = list(
#' DestinationRegion = "string",
#' RetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' SnapshotCopyGrantName = "string"
#' ),
#' ClusterPublicKey = "string",
#' ClusterNodes = list(
#' list(
#' NodeRole = "string",
#' PrivateIPAddress = "string",
#' PublicIPAddress = "string"
#' )
#' ),
#' ElasticIpStatus = list(
#' ElasticIp = "string",
#' Status = "string"
#' ),
#' ClusterRevisionNumber = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' ),
#' KmsKeyId = "string",
#' EnhancedVpcRouting = TRUE|FALSE,
#' IamRoles = list(
#' list(
#' IamRoleArn = "string",
#' ApplyStatus = "string"
#' )
#' ),
#' PendingActions = list(
#' "string"
#' ),
#' MaintenanceTrackName = "string",
#' ElasticResizeNumberOfNodeOptions = "string",
#' DeferredMaintenanceWindows = list(
#' list(
#' DeferMaintenanceIdentifier = "string",
#' DeferMaintenanceStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' DeferMaintenanceEndTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' ),
#' SnapshotScheduleIdentifier = "string",
#' SnapshotScheduleState = "MODIFYING"|"ACTIVE"|"FAILED",
#' ExpectedNextSnapshotScheduleTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ExpectedNextSnapshotScheduleTimeStatus = "string",
#' NextMaintenanceWindowStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ResizeInfo = list(
#' ResizeType = "string",
#' AllowCancelResize = TRUE|FALSE
#' ),
#' AvailabilityZoneRelocationStatus = "string",
#' ClusterNamespaceArn = "string"
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_clusters(
#' ClusterIdentifier = "string",
#' MaxRecords = 123,
#' Marker = "string",
#' TagKeys = list(
#' "string"
#' ),
#' TagValues = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_describe_clusters
redshift_describe_clusters <- function(ClusterIdentifier = NULL, MaxRecords = NULL, Marker = NULL, TagKeys = NULL, TagValues = NULL) {
op <- new_operation(
name = "DescribeClusters",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_clusters_input(ClusterIdentifier = ClusterIdentifier, MaxRecords = MaxRecords, Marker = Marker, TagKeys = TagKeys, TagValues = TagValues)
output <- .redshift$describe_clusters_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_clusters <- redshift_describe_clusters
#' Returns a list of parameter settings for the specified parameter group
#' family
#'
#' @description
#' Returns a list of parameter settings for the specified parameter group
#' family.
#'
#' For more information about parameters and parameter groups, go to
#' [Amazon Redshift Parameter
#' Groups](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' @usage
#' redshift_describe_default_cluster_parameters(ParameterGroupFamily,
#' MaxRecords, Marker)
#'
#' @param ParameterGroupFamily [required] The name of the cluster parameter group family.
#' @param MaxRecords The maximum number of response records to return in each call. If the
#' number of remaining response records exceeds the specified `MaxRecords`
#' value, a value is returned in a `marker` field of the response. You can
#' retrieve the next set of records by retrying the command with the
#' returned marker value.
#'
#' Default: `100`
#'
#' Constraints: minimum 20, maximum 100.
#' @param Marker An optional parameter that specifies the starting point to return a set
#' of response records. When the results of a
#' [`describe_default_cluster_parameters`][redshift_describe_default_cluster_parameters]
#' request exceed the value specified in `MaxRecords`, AWS returns a value
#' in the `Marker` field of the response. You can retrieve the next set of
#' response records by providing the returned marker value in the `Marker`
#' parameter and retrying the request.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' DefaultClusterParameters = list(
#' ParameterGroupFamily = "string",
#' Marker = "string",
#' Parameters = list(
#' list(
#' ParameterName = "string",
#' ParameterValue = "string",
#' Description = "string",
#' Source = "string",
#' DataType = "string",
#' AllowedValues = "string",
#' ApplyType = "static"|"dynamic",
#' IsModifiable = TRUE|FALSE,
#' MinimumEngineVersion = "string"
#' )
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_default_cluster_parameters(
#' ParameterGroupFamily = "string",
#' MaxRecords = 123,
#' Marker = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_describe_default_cluster_parameters
redshift_describe_default_cluster_parameters <- function(ParameterGroupFamily, MaxRecords = NULL, Marker = NULL) {
op <- new_operation(
name = "DescribeDefaultClusterParameters",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_default_cluster_parameters_input(ParameterGroupFamily = ParameterGroupFamily, MaxRecords = MaxRecords, Marker = Marker)
output <- .redshift$describe_default_cluster_parameters_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_default_cluster_parameters <- redshift_describe_default_cluster_parameters
#' Displays a list of event categories for all event source types, or for a
#' specified source type
#'
#' @description
#' Displays a list of event categories for all event source types, or for a
#' specified source type. For a list of the event categories and source
#' types, go to [Amazon Redshift Event
#' Notifications](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-event-notifications.html).
#'
#' @usage
#' redshift_describe_event_categories(SourceType)
#'
#' @param SourceType The source type, such as cluster or parameter group, to which the
#' described event categories apply.
#'
#' Valid values: cluster, cluster-snapshot, cluster-parameter-group,
#' cluster-security-group, and scheduled-action.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' EventCategoriesMapList = list(
#' list(
#' SourceType = "string",
#' Events = list(
#' list(
#' EventId = "string",
#' EventCategories = list(
#' "string"
#' ),
#' EventDescription = "string",
#' Severity = "string"
#' )
#' )
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_event_categories(
#' SourceType = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_describe_event_categories
redshift_describe_event_categories <- function(SourceType = NULL) {
op <- new_operation(
name = "DescribeEventCategories",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_event_categories_input(SourceType = SourceType)
output <- .redshift$describe_event_categories_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_event_categories <- redshift_describe_event_categories
#' Lists descriptions of all the Amazon Redshift event notification
#' subscriptions for a customer account
#'
#' @description
#' Lists descriptions of all the Amazon Redshift event notification
#' subscriptions for a customer account. If you specify a subscription
#' name, lists the description for that subscription.
#'
#' If you specify both tag keys and tag values in the same request, Amazon
#' Redshift returns all event notification subscriptions that match any
#' combination of the specified keys and values. For example, if you have
#' `owner` and `environment` for tag keys, and `admin` and `test` for tag
#' values, all subscriptions that have any combination of those values are
#' returned.
#'
#' If both tag keys and values are omitted from the request, subscriptions
#' are returned regardless of whether they have tag keys or values
#' associated with them.
#'
#' @usage
#' redshift_describe_event_subscriptions(SubscriptionName, MaxRecords,
#' Marker, TagKeys, TagValues)
#'
#' @param SubscriptionName The name of the Amazon Redshift event notification subscription to be
#' described.
#' @param MaxRecords The maximum number of response records to return in each call. If the
#' number of remaining response records exceeds the specified `MaxRecords`
#' value, a value is returned in a `marker` field of the response. You can
#' retrieve the next set of records by retrying the command with the
#' returned marker value.
#'
#' Default: `100`
#'
#' Constraints: minimum 20, maximum 100.
#' @param Marker An optional parameter that specifies the starting point to return a set
#' of response records. When the results of a DescribeEventSubscriptions
#' request exceed the value specified in `MaxRecords`, AWS returns a value
#' in the `Marker` field of the response. You can retrieve the next set of
#' response records by providing the returned marker value in the `Marker`
#' parameter and retrying the request.
#' @param TagKeys A tag key or keys for which you want to return all matching event
#' notification subscriptions that are associated with the specified key or
#' keys. For example, suppose that you have subscriptions that are tagged
#' with keys called `owner` and `environment`. If you specify both of these
#' tag keys in the request, Amazon Redshift returns a response with the
#' subscriptions that have either or both of these tag keys associated with
#' them.
#' @param TagValues A tag value or values for which you want to return all matching event
#' notification subscriptions that are associated with the specified tag
#' value or values. For example, suppose that you have subscriptions that
#' are tagged with values called `admin` and `test`. If you specify both of
#' these tag values in the request, Amazon Redshift returns a response with
#' the subscriptions that have either or both of these tag values
#' associated with them.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Marker = "string",
#' EventSubscriptionsList = list(
#' list(
#' CustomerAwsId = "string",
#' CustSubscriptionId = "string",
#' SnsTopicArn = "string",
#' Status = "string",
#' SubscriptionCreationTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' SourceType = "string",
#' SourceIdsList = list(
#' "string"
#' ),
#' EventCategoriesList = list(
#' "string"
#' ),
#' Severity = "string",
#' Enabled = TRUE|FALSE,
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_event_subscriptions(
#' SubscriptionName = "string",
#' MaxRecords = 123,
#' Marker = "string",
#' TagKeys = list(
#' "string"
#' ),
#' TagValues = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_describe_event_subscriptions
redshift_describe_event_subscriptions <- function(SubscriptionName = NULL, MaxRecords = NULL, Marker = NULL, TagKeys = NULL, TagValues = NULL) {
op <- new_operation(
name = "DescribeEventSubscriptions",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_event_subscriptions_input(SubscriptionName = SubscriptionName, MaxRecords = MaxRecords, Marker = Marker, TagKeys = TagKeys, TagValues = TagValues)
output <- .redshift$describe_event_subscriptions_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_event_subscriptions <- redshift_describe_event_subscriptions
#' Returns events related to clusters, security groups, snapshots, and
#' parameter groups for the past 14 days
#'
#' @description
#' Returns events related to clusters, security groups, snapshots, and
#' parameter groups for the past 14 days. Events specific to a particular
#' cluster, security group, snapshot or parameter group can be obtained by
#' providing the name as a parameter. By default, the past hour of events
#' are returned.
#'
#' @usage
#' redshift_describe_events(SourceIdentifier, SourceType, StartTime,
#' EndTime, Duration, MaxRecords, Marker)
#'
#' @param SourceIdentifier The identifier of the event source for which events will be returned. If
#' this parameter is not specified, then all sources are included in the
#' response.
#'
#' Constraints:
#'
#' If *SourceIdentifier* is supplied, *SourceType* must also be provided.
#'
#' - Specify a cluster identifier when *SourceType* is `cluster`.
#'
#' - Specify a cluster security group name when *SourceType* is
#' `cluster-security-group`.
#'
#' - Specify a cluster parameter group name when *SourceType* is
#' `cluster-parameter-group`.
#'
#' - Specify a cluster snapshot identifier when *SourceType* is
#' `cluster-snapshot`.
#' @param SourceType The event source to retrieve events for. If no value is specified, all
#' events are returned.
#'
#' Constraints:
#'
#' If *SourceType* is supplied, *SourceIdentifier* must also be provided.
#'
#' - Specify `cluster` when *SourceIdentifier* is a cluster identifier.
#'
#' - Specify `cluster-security-group` when *SourceIdentifier* is a
#' cluster security group name.
#'
#' - Specify `cluster-parameter-group` when *SourceIdentifier* is a
#' cluster parameter group name.
#'
#' - Specify `cluster-snapshot` when *SourceIdentifier* is a cluster
#' snapshot identifier.
#' @param StartTime The beginning of the time interval to retrieve events for, specified in
#' ISO 8601 format. For more information about ISO 8601, go to the [ISO8601
#' Wikipedia page.](https://en.wikipedia.org/wiki/ISO_8601)
#'
#' Example: `2009-07-08T18:00Z`
#' @param EndTime The end of the time interval for which to retrieve events, specified in
#' ISO 8601 format. For more information about ISO 8601, go to the [ISO8601
#' Wikipedia page.](https://en.wikipedia.org/wiki/ISO_8601)
#'
#' Example: `2009-07-08T18:00Z`
#' @param Duration The number of minutes prior to the time of the request for which to
#' retrieve events. For example, if the request is sent at 18:00 and you
#' specify a duration of 60, then only events which have occurred after
#' 17:00 will be returned.
#'
#' Default: `60`
#' @param MaxRecords The maximum number of response records to return in each call. If the
#' number of remaining response records exceeds the specified `MaxRecords`
#' value, a value is returned in a `marker` field of the response. You can
#' retrieve the next set of records by retrying the command with the
#' returned marker value.
#'
#' Default: `100`
#'
#' Constraints: minimum 20, maximum 100.
#' @param Marker An optional parameter that specifies the starting point to return a set
#' of response records. When the results of a
#' [`describe_events`][redshift_describe_events] request exceed the value
#' specified in `MaxRecords`, AWS returns a value in the `Marker` field of
#' the response. You can retrieve the next set of response records by
#' providing the returned marker value in the `Marker` parameter and
#' retrying the request.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Marker = "string",
#' Events = list(
#' list(
#' SourceIdentifier = "string",
#' SourceType = "cluster"|"cluster-parameter-group"|"cluster-security-group"|"cluster-snapshot"|"scheduled-action",
#' Message = "string",
#' EventCategories = list(
#' "string"
#' ),
#' Severity = "string",
#' Date = as.POSIXct(
#' "2015-01-01"
#' ),
#' EventId = "string"
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_events(
#' SourceIdentifier = "string",
#' SourceType = "cluster"|"cluster-parameter-group"|"cluster-security-group"|"cluster-snapshot"|"scheduled-action",
#' StartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' EndTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' Duration = 123,
#' MaxRecords = 123,
#' Marker = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_describe_events
redshift_describe_events <- function(SourceIdentifier = NULL, SourceType = NULL, StartTime = NULL, EndTime = NULL, Duration = NULL, MaxRecords = NULL, Marker = NULL) {
op <- new_operation(
name = "DescribeEvents",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_events_input(SourceIdentifier = SourceIdentifier, SourceType = SourceType, StartTime = StartTime, EndTime = EndTime, Duration = Duration, MaxRecords = MaxRecords, Marker = Marker)
output <- .redshift$describe_events_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_events <- redshift_describe_events
#' Returns information about the specified HSM client certificate
#'
#' @description
#' Returns information about the specified HSM client certificate. If no
#' certificate ID is specified, returns information about all the HSM
#' certificates owned by your AWS customer account.
#'
#' If you specify both tag keys and tag values in the same request, Amazon
#' Redshift returns all HSM client certificates that match any combination
#' of the specified keys and values. For example, if you have `owner` and
#' `environment` for tag keys, and `admin` and `test` for tag values, all
#' HSM client certificates that have any combination of those values are
#' returned.
#'
#' If both tag keys and values are omitted from the request, HSM client
#' certificates are returned regardless of whether they have tag keys or
#' values associated with them.
#'
#' @usage
#' redshift_describe_hsm_client_certificates(
#' HsmClientCertificateIdentifier, MaxRecords, Marker, TagKeys, TagValues)
#'
#' @param HsmClientCertificateIdentifier The identifier of a specific HSM client certificate for which you want
#' information. If no identifier is specified, information is returned for
#' all HSM client certificates owned by your AWS customer account.
#' @param MaxRecords The maximum number of response records to return in each call. If the
#' number of remaining response records exceeds the specified `MaxRecords`
#' value, a value is returned in a `marker` field of the response. You can
#' retrieve the next set of records by retrying the command with the
#' returned marker value.
#'
#' Default: `100`
#'
#' Constraints: minimum 20, maximum 100.
#' @param Marker An optional parameter that specifies the starting point to return a set
#' of response records. When the results of a
#' [`describe_hsm_client_certificates`][redshift_describe_hsm_client_certificates]
#' request exceed the value specified in `MaxRecords`, AWS returns a value
#' in the `Marker` field of the response. You can retrieve the next set of
#' response records by providing the returned marker value in the `Marker`
#' parameter and retrying the request.
#' @param TagKeys A tag key or keys for which you want to return all matching HSM client
#' certificates that are associated with the specified key or keys. For
#' example, suppose that you have HSM client certificates that are tagged
#' with keys called `owner` and `environment`. If you specify both of these
#' tag keys in the request, Amazon Redshift returns a response with the HSM
#' client certificates that have either or both of these tag keys
#' associated with them.
#' @param TagValues A tag value or values for which you want to return all matching HSM
#' client certificates that are associated with the specified tag value or
#' values. For example, suppose that you have HSM client certificates that
#' are tagged with values called `admin` and `test`. If you specify both of
#' these tag values in the request, Amazon Redshift returns a response with
#' the HSM client certificates that have either or both of these tag values
#' associated with them.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Marker = "string",
#' HsmClientCertificates = list(
#' list(
#' HsmClientCertificateIdentifier = "string",
#' HsmClientCertificatePublicKey = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_hsm_client_certificates(
#' HsmClientCertificateIdentifier = "string",
#' MaxRecords = 123,
#' Marker = "string",
#' TagKeys = list(
#' "string"
#' ),
#' TagValues = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_describe_hsm_client_certificates
redshift_describe_hsm_client_certificates <- function(HsmClientCertificateIdentifier = NULL, MaxRecords = NULL, Marker = NULL, TagKeys = NULL, TagValues = NULL) {
op <- new_operation(
name = "DescribeHsmClientCertificates",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_hsm_client_certificates_input(HsmClientCertificateIdentifier = HsmClientCertificateIdentifier, MaxRecords = MaxRecords, Marker = Marker, TagKeys = TagKeys, TagValues = TagValues)
output <- .redshift$describe_hsm_client_certificates_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_hsm_client_certificates <- redshift_describe_hsm_client_certificates
#' Returns information about the specified Amazon Redshift HSM
#' configuration
#'
#' @description
#' Returns information about the specified Amazon Redshift HSM
#' configuration. If no configuration ID is specified, returns information
#' about all the HSM configurations owned by your AWS customer account.
#'
#' If you specify both tag keys and tag values in the same request, Amazon
#' Redshift returns all HSM connections that match any combination of the
#' specified keys and values. For example, if you have `owner` and
#' `environment` for tag keys, and `admin` and `test` for tag values, all
#' HSM connections that have any combination of those values are returned.
#'
#' If both tag keys and values are omitted from the request, HSM
#' connections are returned regardless of whether they have tag keys or
#' values associated with them.
#'
#' @usage
#' redshift_describe_hsm_configurations(HsmConfigurationIdentifier,
#' MaxRecords, Marker, TagKeys, TagValues)
#'
#' @param HsmConfigurationIdentifier The identifier of a specific Amazon Redshift HSM configuration to be
#' described. If no identifier is specified, information is returned for
#' all HSM configurations owned by your AWS customer account.
#' @param MaxRecords The maximum number of response records to return in each call. If the
#' number of remaining response records exceeds the specified `MaxRecords`
#' value, a value is returned in a `marker` field of the response. You can
#' retrieve the next set of records by retrying the command with the
#' returned marker value.
#'
#' Default: `100`
#'
#' Constraints: minimum 20, maximum 100.
#' @param Marker An optional parameter that specifies the starting point to return a set
#' of response records. When the results of a
#' [`describe_hsm_configurations`][redshift_describe_hsm_configurations]
#' request exceed the value specified in `MaxRecords`, AWS returns a value
#' in the `Marker` field of the response. You can retrieve the next set of
#' response records by providing the returned marker value in the `Marker`
#' parameter and retrying the request.
#' @param TagKeys A tag key or keys for which you want to return all matching HSM
#' configurations that are associated with the specified key or keys. For
#' example, suppose that you have HSM configurations that are tagged with
#' keys called `owner` and `environment`. If you specify both of these tag
#' keys in the request, Amazon Redshift returns a response with the HSM
#' configurations that have either or both of these tag keys associated
#' with them.
#' @param TagValues A tag value or values for which you want to return all matching HSM
#' configurations that are associated with the specified tag value or
#' values. For example, suppose that you have HSM configurations that are
#' tagged with values called `admin` and `test`. If you specify both of
#' these tag values in the request, Amazon Redshift returns a response with
#' the HSM configurations that have either or both of these tag values
#' associated with them.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Marker = "string",
#' HsmConfigurations = list(
#' list(
#' HsmConfigurationIdentifier = "string",
#' Description = "string",
#' HsmIpAddress = "string",
#' HsmPartitionName = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_hsm_configurations(
#' HsmConfigurationIdentifier = "string",
#' MaxRecords = 123,
#' Marker = "string",
#' TagKeys = list(
#' "string"
#' ),
#' TagValues = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_describe_hsm_configurations
redshift_describe_hsm_configurations <- function(HsmConfigurationIdentifier = NULL, MaxRecords = NULL, Marker = NULL, TagKeys = NULL, TagValues = NULL) {
op <- new_operation(
name = "DescribeHsmConfigurations",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_hsm_configurations_input(HsmConfigurationIdentifier = HsmConfigurationIdentifier, MaxRecords = MaxRecords, Marker = Marker, TagKeys = TagKeys, TagValues = TagValues)
output <- .redshift$describe_hsm_configurations_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_hsm_configurations <- redshift_describe_hsm_configurations
#' Describes whether information, such as queries and connection attempts,
#' is being logged for the specified Amazon Redshift cluster
#'
#' @description
#' Describes whether information, such as queries and connection attempts,
#' is being logged for the specified Amazon Redshift cluster.
#'
#' @usage
#' redshift_describe_logging_status(ClusterIdentifier)
#'
#' @param ClusterIdentifier [required] The identifier of the cluster from which to get the logging status.
#'
#' Example: `examplecluster`
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' LoggingEnabled = TRUE|FALSE,
#' BucketName = "string",
#' S3KeyPrefix = "string",
#' LastSuccessfulDeliveryTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' LastFailureTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' LastFailureMessage = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_logging_status(
#' ClusterIdentifier = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_describe_logging_status
redshift_describe_logging_status <- function(ClusterIdentifier) {
op <- new_operation(
name = "DescribeLoggingStatus",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_logging_status_input(ClusterIdentifier = ClusterIdentifier)
output <- .redshift$describe_logging_status_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_logging_status <- redshift_describe_logging_status
#' Returns properties of possible node configurations such as node type,
#' number of nodes, and disk usage for the specified action type
#'
#' @description
#' Returns properties of possible node configurations such as node type,
#' number of nodes, and disk usage for the specified action type.
#'
#' @usage
#' redshift_describe_node_configuration_options(ActionType,
#' ClusterIdentifier, SnapshotIdentifier, OwnerAccount, Filters, Marker,
#' MaxRecords)
#'
#' @param ActionType [required] The action type to evaluate for possible node configurations. Specify
#' "restore-cluster" to get configuration combinations based on an existing
#' snapshot. Specify "recommend-node-config" to get configuration
#' recommendations based on an existing cluster or snapshot. Specify
#' "resize-cluster" to get configuration combinations for elastic resize
#' based on an existing cluster.
#' @param ClusterIdentifier The identifier of the cluster to evaluate for possible node
#' configurations.
#' @param SnapshotIdentifier The identifier of the snapshot to evaluate for possible node
#' configurations.
#' @param OwnerAccount The AWS customer account used to create or copy the snapshot. Required
#' if you are restoring a snapshot you do not own, optional if you own the
#' snapshot.
#' @param Filters A set of name, operator, and value items to filter the results.
#' @param Marker An optional parameter that specifies the starting point to return a set
#' of response records. When the results of a
#' [`describe_node_configuration_options`][redshift_describe_node_configuration_options]
#' request exceed the value specified in `MaxRecords`, AWS returns a value
#' in the `Marker` field of the response. You can retrieve the next set of
#' response records by providing the returned marker value in the `Marker`
#' parameter and retrying the request.
#' @param MaxRecords The maximum number of response records to return in each call. If the
#' number of remaining response records exceeds the specified `MaxRecords`
#' value, a value is returned in a `marker` field of the response. You can
#' retrieve the next set of records by retrying the command with the
#' returned marker value.
#'
#' Default: `500`
#'
#' Constraints: minimum 100, maximum 500.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' NodeConfigurationOptionList = list(
#' list(
#' NodeType = "string",
#' NumberOfNodes = 123,
#' EstimatedDiskUtilizationPercent = 123.0,
#' Mode = "standard"|"high-performance"
#' )
#' ),
#' Marker = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_node_configuration_options(
#' ActionType = "restore-cluster"|"recommend-node-config"|"resize-cluster",
#' ClusterIdentifier = "string",
#' SnapshotIdentifier = "string",
#' OwnerAccount = "string",
#' Filters = list(
#' list(
#' Name = "NodeType"|"NumberOfNodes"|"EstimatedDiskUtilizationPercent"|"Mode",
#' Operator = "eq"|"lt"|"gt"|"le"|"ge"|"in"|"between",
#' Values = list(
#' "string"
#' )
#' )
#' ),
#' Marker = "string",
#' MaxRecords = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_describe_node_configuration_options
redshift_describe_node_configuration_options <- function(ActionType, ClusterIdentifier = NULL, SnapshotIdentifier = NULL, OwnerAccount = NULL, Filters = NULL, Marker = NULL, MaxRecords = NULL) {
op <- new_operation(
name = "DescribeNodeConfigurationOptions",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_node_configuration_options_input(ActionType = ActionType, ClusterIdentifier = ClusterIdentifier, SnapshotIdentifier = SnapshotIdentifier, OwnerAccount = OwnerAccount, Filters = Filters, Marker = Marker, MaxRecords = MaxRecords)
output <- .redshift$describe_node_configuration_options_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_node_configuration_options <- redshift_describe_node_configuration_options
#' Returns a list of orderable cluster options
#'
#' @description
#' Returns a list of orderable cluster options. Before you create a new
#' cluster you can use this operation to find what options are available,
#' such as the EC2 Availability Zones (AZ) in the specific AWS Region that
#' you can specify, and the node types you can request. The node types
#' differ by available storage, memory, CPU and price. With the cost
#' involved you might want to obtain a list of cluster options in the
#' specific region and specify values when creating a cluster. For more
#' information about managing clusters, go to [Amazon Redshift
#' Clusters](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' @usage
#' redshift_describe_orderable_cluster_options(ClusterVersion, NodeType,
#' MaxRecords, Marker)
#'
#' @param ClusterVersion The version filter value. Specify this parameter to show only the
#' available offerings matching the specified version.
#'
#' Default: All versions.
#'
#' Constraints: Must be one of the version returned from
#' [`describe_cluster_versions`][redshift_describe_cluster_versions].
#' @param NodeType The node type filter value. Specify this parameter to show only the
#' available offerings matching the specified node type.
#' @param MaxRecords The maximum number of response records to return in each call. If the
#' number of remaining response records exceeds the specified `MaxRecords`
#' value, a value is returned in a `marker` field of the response. You can
#' retrieve the next set of records by retrying the command with the
#' returned marker value.
#'
#' Default: `100`
#'
#' Constraints: minimum 20, maximum 100.
#' @param Marker An optional parameter that specifies the starting point to return a set
#' of response records. When the results of a
#' [`describe_orderable_cluster_options`][redshift_describe_orderable_cluster_options]
#' request exceed the value specified in `MaxRecords`, AWS returns a value
#' in the `Marker` field of the response. You can retrieve the next set of
#' response records by providing the returned marker value in the `Marker`
#' parameter and retrying the request.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' OrderableClusterOptions = list(
#' list(
#' ClusterVersion = "string",
#' ClusterType = "string",
#' NodeType = "string",
#' AvailabilityZones = list(
#' list(
#' Name = "string",
#' SupportedPlatforms = list(
#' list(
#' Name = "string"
#' )
#' )
#' )
#' )
#' )
#' ),
#' Marker = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_orderable_cluster_options(
#' ClusterVersion = "string",
#' NodeType = "string",
#' MaxRecords = 123,
#' Marker = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_describe_orderable_cluster_options
redshift_describe_orderable_cluster_options <- function(ClusterVersion = NULL, NodeType = NULL, MaxRecords = NULL, Marker = NULL) {
op <- new_operation(
name = "DescribeOrderableClusterOptions",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_orderable_cluster_options_input(ClusterVersion = ClusterVersion, NodeType = NodeType, MaxRecords = MaxRecords, Marker = Marker)
output <- .redshift$describe_orderable_cluster_options_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_orderable_cluster_options <- redshift_describe_orderable_cluster_options
#' Returns a list of the available reserved node offerings by Amazon
#' Redshift with their descriptions including the node type, the fixed and
#' recurring costs of reserving the node and duration the node will be
#' reserved for you
#'
#' @description
#' Returns a list of the available reserved node offerings by Amazon
#' Redshift with their descriptions including the node type, the fixed and
#' recurring costs of reserving the node and duration the node will be
#' reserved for you. These descriptions help you determine which reserve
#' node offering you want to purchase. You then use the unique offering ID
#' in you call to
#' [`purchase_reserved_node_offering`][redshift_purchase_reserved_node_offering]
#' to reserve one or more nodes for your Amazon Redshift cluster.
#'
#' For more information about reserved node offerings, go to [Purchasing
#' Reserved
#' Nodes](https://docs.aws.amazon.com/redshift/latest/mgmt/purchase-reserved-node-instance.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' @usage
#' redshift_describe_reserved_node_offerings(ReservedNodeOfferingId,
#' MaxRecords, Marker)
#'
#' @param ReservedNodeOfferingId The unique identifier for the offering.
#' @param MaxRecords The maximum number of response records to return in each call. If the
#' number of remaining response records exceeds the specified `MaxRecords`
#' value, a value is returned in a `marker` field of the response. You can
#' retrieve the next set of records by retrying the command with the
#' returned marker value.
#'
#' Default: `100`
#'
#' Constraints: minimum 20, maximum 100.
#' @param Marker An optional parameter that specifies the starting point to return a set
#' of response records. When the results of a
#' [`describe_reserved_node_offerings`][redshift_describe_reserved_node_offerings]
#' request exceed the value specified in `MaxRecords`, AWS returns a value
#' in the `Marker` field of the response. You can retrieve the next set of
#' response records by providing the returned marker value in the `Marker`
#' parameter and retrying the request.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Marker = "string",
#' ReservedNodeOfferings = list(
#' list(
#' ReservedNodeOfferingId = "string",
#' NodeType = "string",
#' Duration = 123,
#' FixedPrice = 123.0,
#' UsagePrice = 123.0,
#' CurrencyCode = "string",
#' OfferingType = "string",
#' RecurringCharges = list(
#' list(
#' RecurringChargeAmount = 123.0,
#' RecurringChargeFrequency = "string"
#' )
#' ),
#' ReservedNodeOfferingType = "Regular"|"Upgradable"
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_reserved_node_offerings(
#' ReservedNodeOfferingId = "string",
#' MaxRecords = 123,
#' Marker = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_describe_reserved_node_offerings
redshift_describe_reserved_node_offerings <- function(ReservedNodeOfferingId = NULL, MaxRecords = NULL, Marker = NULL) {
op <- new_operation(
name = "DescribeReservedNodeOfferings",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_reserved_node_offerings_input(ReservedNodeOfferingId = ReservedNodeOfferingId, MaxRecords = MaxRecords, Marker = Marker)
output <- .redshift$describe_reserved_node_offerings_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_reserved_node_offerings <- redshift_describe_reserved_node_offerings
#' Returns the descriptions of the reserved nodes
#'
#' @description
#' Returns the descriptions of the reserved nodes.
#'
#' @usage
#' redshift_describe_reserved_nodes(ReservedNodeId, MaxRecords, Marker)
#'
#' @param ReservedNodeId Identifier for the node reservation.
#' @param MaxRecords The maximum number of response records to return in each call. If the
#' number of remaining response records exceeds the specified `MaxRecords`
#' value, a value is returned in a `marker` field of the response. You can
#' retrieve the next set of records by retrying the command with the
#' returned marker value.
#'
#' Default: `100`
#'
#' Constraints: minimum 20, maximum 100.
#' @param Marker An optional parameter that specifies the starting point to return a set
#' of response records. When the results of a
#' [`describe_reserved_nodes`][redshift_describe_reserved_nodes] request
#' exceed the value specified in `MaxRecords`, AWS returns a value in the
#' `Marker` field of the response. You can retrieve the next set of
#' response records by providing the returned marker value in the `Marker`
#' parameter and retrying the request.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Marker = "string",
#' ReservedNodes = list(
#' list(
#' ReservedNodeId = "string",
#' ReservedNodeOfferingId = "string",
#' NodeType = "string",
#' StartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' Duration = 123,
#' FixedPrice = 123.0,
#' UsagePrice = 123.0,
#' CurrencyCode = "string",
#' NodeCount = 123,
#' State = "string",
#' OfferingType = "string",
#' RecurringCharges = list(
#' list(
#' RecurringChargeAmount = 123.0,
#' RecurringChargeFrequency = "string"
#' )
#' ),
#' ReservedNodeOfferingType = "Regular"|"Upgradable"
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_reserved_nodes(
#' ReservedNodeId = "string",
#' MaxRecords = 123,
#' Marker = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_describe_reserved_nodes
redshift_describe_reserved_nodes <- function(ReservedNodeId = NULL, MaxRecords = NULL, Marker = NULL) {
op <- new_operation(
name = "DescribeReservedNodes",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_reserved_nodes_input(ReservedNodeId = ReservedNodeId, MaxRecords = MaxRecords, Marker = Marker)
output <- .redshift$describe_reserved_nodes_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_reserved_nodes <- redshift_describe_reserved_nodes
#' Returns information about the last resize operation for the specified
#' cluster
#'
#' @description
#' Returns information about the last resize operation for the specified
#' cluster. If no resize operation has ever been initiated for the
#' specified cluster, a `HTTP 404` error is returned. If a resize operation
#' was initiated and completed, the status of the resize remains as
#' `SUCCEEDED` until the next resize.
#'
#' A resize operation can be requested using
#' [`modify_cluster`][redshift_modify_cluster] and specifying a different
#' number or type of nodes for the cluster.
#'
#' @usage
#' redshift_describe_resize(ClusterIdentifier)
#'
#' @param ClusterIdentifier [required] The unique identifier of a cluster whose resize progress you are
#' requesting. This parameter is case-sensitive.
#'
#' By default, resize operations for all clusters defined for an AWS
#' account are returned.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' TargetNodeType = "string",
#' TargetNumberOfNodes = 123,
#' TargetClusterType = "string",
#' Status = "string",
#' ImportTablesCompleted = list(
#' "string"
#' ),
#' ImportTablesInProgress = list(
#' "string"
#' ),
#' ImportTablesNotStarted = list(
#' "string"
#' ),
#' AvgResizeRateInMegaBytesPerSecond = 123.0,
#' TotalResizeDataInMegaBytes = 123,
#' ProgressInMegaBytes = 123,
#' ElapsedTimeInSeconds = 123,
#' EstimatedTimeToCompletionInSeconds = 123,
#' ResizeType = "string",
#' Message = "string",
#' TargetEncryptionType = "string",
#' DataTransferProgressPercent = 123.0
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_resize(
#' ClusterIdentifier = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_describe_resize
redshift_describe_resize <- function(ClusterIdentifier) {
op <- new_operation(
name = "DescribeResize",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_resize_input(ClusterIdentifier = ClusterIdentifier)
output <- .redshift$describe_resize_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_resize <- redshift_describe_resize
#' Describes properties of scheduled actions
#'
#' @description
#' Describes properties of scheduled actions.
#'
#' @usage
#' redshift_describe_scheduled_actions(ScheduledActionName,
#' TargetActionType, StartTime, EndTime, Active, Filters, Marker,
#' MaxRecords)
#'
#' @param ScheduledActionName The name of the scheduled action to retrieve.
#' @param TargetActionType The type of the scheduled actions to retrieve.
#' @param StartTime The start time in UTC of the scheduled actions to retrieve. Only active
#' scheduled actions that have invocations after this time are retrieved.
#' @param EndTime The end time in UTC of the scheduled action to retrieve. Only active
#' scheduled actions that have invocations before this time are retrieved.
#' @param Active If true, retrieve only active scheduled actions. If false, retrieve only
#' disabled scheduled actions.
#' @param Filters List of scheduled action filters.
#' @param Marker An optional parameter that specifies the starting point to return a set
#' of response records. When the results of a
#' [`describe_scheduled_actions`][redshift_describe_scheduled_actions]
#' request exceed the value specified in `MaxRecords`, AWS returns a value
#' in the `Marker` field of the response. You can retrieve the next set of
#' response records by providing the returned marker value in the `Marker`
#' parameter and retrying the request.
#' @param MaxRecords The maximum number of response records to return in each call. If the
#' number of remaining response records exceeds the specified `MaxRecords`
#' value, a value is returned in a `marker` field of the response. You can
#' retrieve the next set of records by retrying the command with the
#' returned marker value.
#'
#' Default: `100`
#'
#' Constraints: minimum 20, maximum 100.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Marker = "string",
#' ScheduledActions = list(
#' list(
#' ScheduledActionName = "string",
#' TargetAction = list(
#' ResizeCluster = list(
#' ClusterIdentifier = "string",
#' ClusterType = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' Classic = TRUE|FALSE
#' ),
#' PauseCluster = list(
#' ClusterIdentifier = "string"
#' ),
#' ResumeCluster = list(
#' ClusterIdentifier = "string"
#' )
#' ),
#' Schedule = "string",
#' IamRole = "string",
#' ScheduledActionDescription = "string",
#' State = "ACTIVE"|"DISABLED",
#' NextInvocations = list(
#' as.POSIXct(
#' "2015-01-01"
#' )
#' ),
#' StartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' EndTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_scheduled_actions(
#' ScheduledActionName = "string",
#' TargetActionType = "ResizeCluster"|"PauseCluster"|"ResumeCluster",
#' StartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' EndTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' Active = TRUE|FALSE,
#' Filters = list(
#' list(
#' Name = "cluster-identifier"|"iam-role",
#' Values = list(
#' "string"
#' )
#' )
#' ),
#' Marker = "string",
#' MaxRecords = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_describe_scheduled_actions
redshift_describe_scheduled_actions <- function(ScheduledActionName = NULL, TargetActionType = NULL, StartTime = NULL, EndTime = NULL, Active = NULL, Filters = NULL, Marker = NULL, MaxRecords = NULL) {
op <- new_operation(
name = "DescribeScheduledActions",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_scheduled_actions_input(ScheduledActionName = ScheduledActionName, TargetActionType = TargetActionType, StartTime = StartTime, EndTime = EndTime, Active = Active, Filters = Filters, Marker = Marker, MaxRecords = MaxRecords)
output <- .redshift$describe_scheduled_actions_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_scheduled_actions <- redshift_describe_scheduled_actions
#' Returns a list of snapshot copy grants owned by the AWS account in the
#' destination region
#'
#' @description
#' Returns a list of snapshot copy grants owned by the AWS account in the
#' destination region.
#'
#' For more information about managing snapshot copy grants, go to [Amazon
#' Redshift Database
#' Encryption](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-db-encryption.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' @usage
#' redshift_describe_snapshot_copy_grants(SnapshotCopyGrantName,
#' MaxRecords, Marker, TagKeys, TagValues)
#'
#' @param SnapshotCopyGrantName The name of the snapshot copy grant.
#' @param MaxRecords The maximum number of response records to return in each call. If the
#' number of remaining response records exceeds the specified `MaxRecords`
#' value, a value is returned in a `marker` field of the response. You can
#' retrieve the next set of records by retrying the command with the
#' returned marker value.
#'
#' Default: `100`
#'
#' Constraints: minimum 20, maximum 100.
#' @param Marker An optional parameter that specifies the starting point to return a set
#' of response records. When the results of a `DescribeSnapshotCopyGrant`
#' request exceed the value specified in `MaxRecords`, AWS returns a value
#' in the `Marker` field of the response. You can retrieve the next set of
#' response records by providing the returned marker value in the `Marker`
#' parameter and retrying the request.
#'
#' Constraints: You can specify either the **SnapshotCopyGrantName**
#' parameter or the **Marker** parameter, but not both.
#' @param TagKeys A tag key or keys for which you want to return all matching resources
#' that are associated with the specified key or keys. For example, suppose
#' that you have resources tagged with keys called `owner` and
#' `environment`. If you specify both of these tag keys in the request,
#' Amazon Redshift returns a response with all resources that have either
#' or both of these tag keys associated with them.
#' @param TagValues A tag value or values for which you want to return all matching
#' resources that are associated with the specified value or values. For
#' example, suppose that you have resources tagged with values called
#' `admin` and `test`. If you specify both of these tag values in the
#' request, Amazon Redshift returns a response with all resources that have
#' either or both of these tag values associated with them.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Marker = "string",
#' SnapshotCopyGrants = list(
#' list(
#' SnapshotCopyGrantName = "string",
#' KmsKeyId = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_snapshot_copy_grants(
#' SnapshotCopyGrantName = "string",
#' MaxRecords = 123,
#' Marker = "string",
#' TagKeys = list(
#' "string"
#' ),
#' TagValues = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_describe_snapshot_copy_grants
redshift_describe_snapshot_copy_grants <- function(SnapshotCopyGrantName = NULL, MaxRecords = NULL, Marker = NULL, TagKeys = NULL, TagValues = NULL) {
op <- new_operation(
name = "DescribeSnapshotCopyGrants",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_snapshot_copy_grants_input(SnapshotCopyGrantName = SnapshotCopyGrantName, MaxRecords = MaxRecords, Marker = Marker, TagKeys = TagKeys, TagValues = TagValues)
output <- .redshift$describe_snapshot_copy_grants_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_snapshot_copy_grants <- redshift_describe_snapshot_copy_grants
#' Returns a list of snapshot schedules
#'
#' @description
#' Returns a list of snapshot schedules.
#'
#' @usage
#' redshift_describe_snapshot_schedules(ClusterIdentifier,
#' ScheduleIdentifier, TagKeys, TagValues, Marker, MaxRecords)
#'
#' @param ClusterIdentifier The unique identifier for the cluster whose snapshot schedules you want
#' to view.
#' @param ScheduleIdentifier A unique identifier for a snapshot schedule.
#' @param TagKeys The key value for a snapshot schedule tag.
#' @param TagValues The value corresponding to the key of the snapshot schedule tag.
#' @param Marker A value that indicates the starting point for the next set of response
#' records in a subsequent request. If a value is returned in a response,
#' you can retrieve the next set of records by providing this returned
#' marker value in the `marker` parameter and retrying the command. If the
#' `marker` field is empty, all response records have been retrieved for
#' the request.
#' @param MaxRecords The maximum number or response records to return in each call. If the
#' number of remaining response records exceeds the specified `MaxRecords`
#' value, a value is returned in a `marker` field of the response. You can
#' retrieve the next set of records by retrying the command with the
#' returned `marker` value.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' SnapshotSchedules = list(
#' list(
#' ScheduleDefinitions = list(
#' "string"
#' ),
#' ScheduleIdentifier = "string",
#' ScheduleDescription = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' ),
#' NextInvocations = list(
#' as.POSIXct(
#' "2015-01-01"
#' )
#' ),
#' AssociatedClusterCount = 123,
#' AssociatedClusters = list(
#' list(
#' ClusterIdentifier = "string",
#' ScheduleAssociationState = "MODIFYING"|"ACTIVE"|"FAILED"
#' )
#' )
#' )
#' ),
#' Marker = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_snapshot_schedules(
#' ClusterIdentifier = "string",
#' ScheduleIdentifier = "string",
#' TagKeys = list(
#' "string"
#' ),
#' TagValues = list(
#' "string"
#' ),
#' Marker = "string",
#' MaxRecords = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_describe_snapshot_schedules
redshift_describe_snapshot_schedules <- function(ClusterIdentifier = NULL, ScheduleIdentifier = NULL, TagKeys = NULL, TagValues = NULL, Marker = NULL, MaxRecords = NULL) {
op <- new_operation(
name = "DescribeSnapshotSchedules",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_snapshot_schedules_input(ClusterIdentifier = ClusterIdentifier, ScheduleIdentifier = ScheduleIdentifier, TagKeys = TagKeys, TagValues = TagValues, Marker = Marker, MaxRecords = MaxRecords)
output <- .redshift$describe_snapshot_schedules_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_snapshot_schedules <- redshift_describe_snapshot_schedules
#' Returns account level backups storage size and provisional storage
#'
#' @description
#' Returns account level backups storage size and provisional storage.
#'
#' @usage
#' redshift_describe_storage()
#'
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' TotalBackupSizeInMegaBytes = 123.0,
#' TotalProvisionedStorageInMegaBytes = 123.0
#' )
#' ```
#'
#'
#' @keywords internal
#'
#' @rdname redshift_describe_storage
redshift_describe_storage <- function() {
op <- new_operation(
name = "DescribeStorage",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_storage_input()
output <- .redshift$describe_storage_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_storage <- redshift_describe_storage
#' Lists the status of one or more table restore requests made using the
#' RestoreTableFromClusterSnapshot API action
#'
#' @description
#' Lists the status of one or more table restore requests made using the
#' [`restore_table_from_cluster_snapshot`][redshift_restore_table_from_cluster_snapshot]
#' API action. If you don't specify a value for the `TableRestoreRequestId`
#' parameter, then
#' [`describe_table_restore_status`][redshift_describe_table_restore_status]
#' returns the status of all table restore requests ordered by the date and
#' time of the request in ascending order. Otherwise
#' [`describe_table_restore_status`][redshift_describe_table_restore_status]
#' returns the status of the table specified by `TableRestoreRequestId`.
#'
#' @usage
#' redshift_describe_table_restore_status(ClusterIdentifier,
#' TableRestoreRequestId, MaxRecords, Marker)
#'
#' @param ClusterIdentifier The Amazon Redshift cluster that the table is being restored to.
#' @param TableRestoreRequestId The identifier of the table restore request to return status for. If you
#' don't specify a `TableRestoreRequestId` value, then
#' [`describe_table_restore_status`][redshift_describe_table_restore_status]
#' returns the status of all in-progress table restore requests.
#' @param MaxRecords The maximum number of records to include in the response. If more
#' records exist than the specified `MaxRecords` value, a pagination token
#' called a marker is included in the response so that the remaining
#' results can be retrieved.
#' @param Marker An optional pagination token provided by a previous
#' [`describe_table_restore_status`][redshift_describe_table_restore_status]
#' request. If this parameter is specified, the response includes only
#' records beyond the marker, up to the value specified by the `MaxRecords`
#' parameter.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' TableRestoreStatusDetails = list(
#' list(
#' TableRestoreRequestId = "string",
#' Status = "PENDING"|"IN_PROGRESS"|"SUCCEEDED"|"FAILED"|"CANCELED",
#' Message = "string",
#' RequestTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ProgressInMegaBytes = 123,
#' TotalDataInMegaBytes = 123,
#' ClusterIdentifier = "string",
#' SnapshotIdentifier = "string",
#' SourceDatabaseName = "string",
#' SourceSchemaName = "string",
#' SourceTableName = "string",
#' TargetDatabaseName = "string",
#' TargetSchemaName = "string",
#' NewTableName = "string"
#' )
#' ),
#' Marker = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_table_restore_status(
#' ClusterIdentifier = "string",
#' TableRestoreRequestId = "string",
#' MaxRecords = 123,
#' Marker = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_describe_table_restore_status
redshift_describe_table_restore_status <- function(ClusterIdentifier = NULL, TableRestoreRequestId = NULL, MaxRecords = NULL, Marker = NULL) {
op <- new_operation(
name = "DescribeTableRestoreStatus",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_table_restore_status_input(ClusterIdentifier = ClusterIdentifier, TableRestoreRequestId = TableRestoreRequestId, MaxRecords = MaxRecords, Marker = Marker)
output <- .redshift$describe_table_restore_status_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_table_restore_status <- redshift_describe_table_restore_status
#' Returns a list of tags
#'
#' @description
#' Returns a list of tags. You can return tags from a specific resource by
#' specifying an ARN, or you can return all tags for a given type of
#' resource, such as clusters, snapshots, and so on.
#'
#' The following are limitations for
#' [`describe_tags`][redshift_describe_tags]:
#'
#' - You cannot specify an ARN and a resource-type value together in the
#' same request.
#'
#' - You cannot use the `MaxRecords` and `Marker` parameters together
#' with the ARN parameter.
#'
#' - The `MaxRecords` parameter can be a range from 10 to 50 results to
#' return in a request.
#'
#' If you specify both tag keys and tag values in the same request, Amazon
#' Redshift returns all resources that match any combination of the
#' specified keys and values. For example, if you have `owner` and
#' `environment` for tag keys, and `admin` and `test` for tag values, all
#' resources that have any combination of those values are returned.
#'
#' If both tag keys and values are omitted from the request, resources are
#' returned regardless of whether they have tag keys or values associated
#' with them.
#'
#' @usage
#' redshift_describe_tags(ResourceName, ResourceType, MaxRecords, Marker,
#' TagKeys, TagValues)
#'
#' @param ResourceName The Amazon Resource Name (ARN) for which you want to describe the tag or
#' tags. For example, `arn:aws:redshift:us-east-2:123456789:cluster:t1`.
#' @param ResourceType The type of resource with which you want to view tags. Valid resource
#' types are:
#'
#' - Cluster
#'
#' - CIDR/IP
#'
#' - EC2 security group
#'
#' - Snapshot
#'
#' - Cluster security group
#'
#' - Subnet group
#'
#' - HSM connection
#'
#' - HSM certificate
#'
#' - Parameter group
#'
#' - Snapshot copy grant
#'
#' For more information about Amazon Redshift resource types and
#' constructing ARNs, go to [Specifying Policy Elements: Actions, Effects,
#' Resources, and
#' Principals](https://docs.aws.amazon.com/redshift/latest/mgmt/redshift-iam-access-control-overview.html#redshift-iam-access-control-specify-actions)
#' in the Amazon Redshift Cluster Management Guide.
#' @param MaxRecords The maximum number or response records to return in each call. If the
#' number of remaining response records exceeds the specified `MaxRecords`
#' value, a value is returned in a `marker` field of the response. You can
#' retrieve the next set of records by retrying the command with the
#' returned `marker` value.
#' @param Marker A value that indicates the starting point for the next set of response
#' records in a subsequent request. If a value is returned in a response,
#' you can retrieve the next set of records by providing this returned
#' marker value in the `marker` parameter and retrying the command. If the
#' `marker` field is empty, all response records have been retrieved for
#' the request.
#' @param TagKeys A tag key or keys for which you want to return all matching resources
#' that are associated with the specified key or keys. For example, suppose
#' that you have resources tagged with keys called `owner` and
#' `environment`. If you specify both of these tag keys in the request,
#' Amazon Redshift returns a response with all resources that have either
#' or both of these tag keys associated with them.
#' @param TagValues A tag value or values for which you want to return all matching
#' resources that are associated with the specified value or values. For
#' example, suppose that you have resources tagged with values called
#' `admin` and `test`. If you specify both of these tag values in the
#' request, Amazon Redshift returns a response with all resources that have
#' either or both of these tag values associated with them.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' TaggedResources = list(
#' list(
#' Tag = list(
#' Key = "string",
#' Value = "string"
#' ),
#' ResourceName = "string",
#' ResourceType = "string"
#' )
#' ),
#' Marker = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_tags(
#' ResourceName = "string",
#' ResourceType = "string",
#' MaxRecords = 123,
#' Marker = "string",
#' TagKeys = list(
#' "string"
#' ),
#' TagValues = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_describe_tags
redshift_describe_tags <- function(ResourceName = NULL, ResourceType = NULL, MaxRecords = NULL, Marker = NULL, TagKeys = NULL, TagValues = NULL) {
op <- new_operation(
name = "DescribeTags",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_tags_input(ResourceName = ResourceName, ResourceType = ResourceType, MaxRecords = MaxRecords, Marker = Marker, TagKeys = TagKeys, TagValues = TagValues)
output <- .redshift$describe_tags_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_tags <- redshift_describe_tags
#' Shows usage limits on a cluster
#'
#' @description
#' Shows usage limits on a cluster. Results are filtered based on the
#' combination of input usage limit identifier, cluster identifier, and
#' feature type parameters:
#'
#' - If usage limit identifier, cluster identifier, and feature type are
#' not provided, then all usage limit objects for the current account
#' in the current region are returned.
#'
#' - If usage limit identifier is provided, then the corresponding usage
#' limit object is returned.
#'
#' - If cluster identifier is provided, then all usage limit objects for
#' the specified cluster are returned.
#'
#' - If cluster identifier and feature type are provided, then all usage
#' limit objects for the combination of cluster and feature are
#' returned.
#'
#' @usage
#' redshift_describe_usage_limits(UsageLimitId, ClusterIdentifier,
#' FeatureType, MaxRecords, Marker, TagKeys, TagValues)
#'
#' @param UsageLimitId The identifier of the usage limit to describe.
#' @param ClusterIdentifier The identifier of the cluster for which you want to describe usage
#' limits.
#' @param FeatureType The feature type for which you want to describe usage limits.
#' @param MaxRecords The maximum number of response records to return in each call. If the
#' number of remaining response records exceeds the specified `MaxRecords`
#' value, a value is returned in a `marker` field of the response. You can
#' retrieve the next set of records by retrying the command with the
#' returned marker value.
#'
#' Default: `100`
#'
#' Constraints: minimum 20, maximum 100.
#' @param Marker An optional parameter that specifies the starting point to return a set
#' of response records. When the results of a
#' [`describe_usage_limits`][redshift_describe_usage_limits] request exceed
#' the value specified in `MaxRecords`, AWS returns a value in the `Marker`
#' field of the response. You can retrieve the next set of response records
#' by providing the returned marker value in the `Marker` parameter and
#' retrying the request.
#' @param TagKeys A tag key or keys for which you want to return all matching usage limit
#' objects that are associated with the specified key or keys. For example,
#' suppose that you have parameter groups that are tagged with keys called
#' `owner` and `environment`. If you specify both of these tag keys in the
#' request, Amazon Redshift returns a response with the usage limit objects
#' have either or both of these tag keys associated with them.
#' @param TagValues A tag value or values for which you want to return all matching usage
#' limit objects that are associated with the specified tag value or
#' values. For example, suppose that you have parameter groups that are
#' tagged with values called `admin` and `test`. If you specify both of
#' these tag values in the request, Amazon Redshift returns a response with
#' the usage limit objects that have either or both of these tag values
#' associated with them.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' UsageLimits = list(
#' list(
#' UsageLimitId = "string",
#' ClusterIdentifier = "string",
#' FeatureType = "spectrum"|"concurrency-scaling",
#' LimitType = "time"|"data-scanned",
#' Amount = 123,
#' Period = "daily"|"weekly"|"monthly",
#' BreachAction = "log"|"emit-metric"|"disable",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' ),
#' Marker = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_usage_limits(
#' UsageLimitId = "string",
#' ClusterIdentifier = "string",
#' FeatureType = "spectrum"|"concurrency-scaling",
#' MaxRecords = 123,
#' Marker = "string",
#' TagKeys = list(
#' "string"
#' ),
#' TagValues = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_describe_usage_limits
redshift_describe_usage_limits <- function(UsageLimitId = NULL, ClusterIdentifier = NULL, FeatureType = NULL, MaxRecords = NULL, Marker = NULL, TagKeys = NULL, TagValues = NULL) {
op <- new_operation(
name = "DescribeUsageLimits",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$describe_usage_limits_input(UsageLimitId = UsageLimitId, ClusterIdentifier = ClusterIdentifier, FeatureType = FeatureType, MaxRecords = MaxRecords, Marker = Marker, TagKeys = TagKeys, TagValues = TagValues)
output <- .redshift$describe_usage_limits_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$describe_usage_limits <- redshift_describe_usage_limits
#' Stops logging information, such as queries and connection attempts, for
#' the specified Amazon Redshift cluster
#'
#' @description
#' Stops logging information, such as queries and connection attempts, for
#' the specified Amazon Redshift cluster.
#'
#' @usage
#' redshift_disable_logging(ClusterIdentifier)
#'
#' @param ClusterIdentifier [required] The identifier of the cluster on which logging is to be stopped.
#'
#' Example: `examplecluster`
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' LoggingEnabled = TRUE|FALSE,
#' BucketName = "string",
#' S3KeyPrefix = "string",
#' LastSuccessfulDeliveryTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' LastFailureTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' LastFailureMessage = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$disable_logging(
#' ClusterIdentifier = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_disable_logging
redshift_disable_logging <- function(ClusterIdentifier) {
op <- new_operation(
name = "DisableLogging",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$disable_logging_input(ClusterIdentifier = ClusterIdentifier)
output <- .redshift$disable_logging_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$disable_logging <- redshift_disable_logging
#' Disables the automatic copying of snapshots from one region to another
#' region for a specified cluster
#'
#' @description
#' Disables the automatic copying of snapshots from one region to another
#' region for a specified cluster.
#'
#' If your cluster and its snapshots are encrypted using a customer master
#' key (CMK) from AWS KMS, use
#' [`delete_snapshot_copy_grant`][redshift_delete_snapshot_copy_grant] to
#' delete the grant that grants Amazon Redshift permission to the CMK in
#' the destination region.
#'
#' @usage
#' redshift_disable_snapshot_copy(ClusterIdentifier)
#'
#' @param ClusterIdentifier [required] The unique identifier of the source cluster that you want to disable
#' copying of snapshots to a destination region.
#'
#' Constraints: Must be the valid name of an existing cluster that has
#' cross-region snapshot copy enabled.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Cluster = list(
#' ClusterIdentifier = "string",
#' NodeType = "string",
#' ClusterStatus = "string",
#' ClusterAvailabilityStatus = "string",
#' ModifyStatus = "string",
#' MasterUsername = "string",
#' DBName = "string",
#' Endpoint = list(
#' Address = "string",
#' Port = 123,
#' VpcEndpoints = list(
#' list(
#' VpcEndpointId = "string"
#' )
#' )
#' ),
#' ClusterCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' AutomatedSnapshotRetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' ClusterSecurityGroups = list(
#' list(
#' ClusterSecurityGroupName = "string",
#' Status = "string"
#' )
#' ),
#' VpcSecurityGroups = list(
#' list(
#' VpcSecurityGroupId = "string",
#' Status = "string"
#' )
#' ),
#' ClusterParameterGroups = list(
#' list(
#' ParameterGroupName = "string",
#' ParameterApplyStatus = "string",
#' ClusterParameterStatusList = list(
#' list(
#' ParameterName = "string",
#' ParameterApplyStatus = "string",
#' ParameterApplyErrorDescription = "string"
#' )
#' )
#' )
#' ),
#' ClusterSubnetGroupName = "string",
#' VpcId = "string",
#' AvailabilityZone = "string",
#' PreferredMaintenanceWindow = "string",
#' PendingModifiedValues = list(
#' MasterUserPassword = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' ClusterType = "string",
#' ClusterVersion = "string",
#' AutomatedSnapshotRetentionPeriod = 123,
#' ClusterIdentifier = "string",
#' PubliclyAccessible = TRUE|FALSE,
#' EnhancedVpcRouting = TRUE|FALSE,
#' MaintenanceTrackName = "string",
#' EncryptionType = "string"
#' ),
#' ClusterVersion = "string",
#' AllowVersionUpgrade = TRUE|FALSE,
#' NumberOfNodes = 123,
#' PubliclyAccessible = TRUE|FALSE,
#' Encrypted = TRUE|FALSE,
#' RestoreStatus = list(
#' Status = "string",
#' CurrentRestoreRateInMegaBytesPerSecond = 123.0,
#' SnapshotSizeInMegaBytes = 123,
#' ProgressInMegaBytes = 123,
#' ElapsedTimeInSeconds = 123,
#' EstimatedTimeToCompletionInSeconds = 123
#' ),
#' DataTransferProgress = list(
#' Status = "string",
#' CurrentRateInMegaBytesPerSecond = 123.0,
#' TotalDataInMegaBytes = 123,
#' DataTransferredInMegaBytes = 123,
#' EstimatedTimeToCompletionInSeconds = 123,
#' ElapsedTimeInSeconds = 123
#' ),
#' HsmStatus = list(
#' HsmClientCertificateIdentifier = "string",
#' HsmConfigurationIdentifier = "string",
#' Status = "string"
#' ),
#' ClusterSnapshotCopyStatus = list(
#' DestinationRegion = "string",
#' RetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' SnapshotCopyGrantName = "string"
#' ),
#' ClusterPublicKey = "string",
#' ClusterNodes = list(
#' list(
#' NodeRole = "string",
#' PrivateIPAddress = "string",
#' PublicIPAddress = "string"
#' )
#' ),
#' ElasticIpStatus = list(
#' ElasticIp = "string",
#' Status = "string"
#' ),
#' ClusterRevisionNumber = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' ),
#' KmsKeyId = "string",
#' EnhancedVpcRouting = TRUE|FALSE,
#' IamRoles = list(
#' list(
#' IamRoleArn = "string",
#' ApplyStatus = "string"
#' )
#' ),
#' PendingActions = list(
#' "string"
#' ),
#' MaintenanceTrackName = "string",
#' ElasticResizeNumberOfNodeOptions = "string",
#' DeferredMaintenanceWindows = list(
#' list(
#' DeferMaintenanceIdentifier = "string",
#' DeferMaintenanceStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' DeferMaintenanceEndTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' ),
#' SnapshotScheduleIdentifier = "string",
#' SnapshotScheduleState = "MODIFYING"|"ACTIVE"|"FAILED",
#' ExpectedNextSnapshotScheduleTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ExpectedNextSnapshotScheduleTimeStatus = "string",
#' NextMaintenanceWindowStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ResizeInfo = list(
#' ResizeType = "string",
#' AllowCancelResize = TRUE|FALSE
#' ),
#' AvailabilityZoneRelocationStatus = "string",
#' ClusterNamespaceArn = "string"
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$disable_snapshot_copy(
#' ClusterIdentifier = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_disable_snapshot_copy
redshift_disable_snapshot_copy <- function(ClusterIdentifier) {
op <- new_operation(
name = "DisableSnapshotCopy",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$disable_snapshot_copy_input(ClusterIdentifier = ClusterIdentifier)
output <- .redshift$disable_snapshot_copy_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$disable_snapshot_copy <- redshift_disable_snapshot_copy
#' Starts logging information, such as queries and connection attempts, for
#' the specified Amazon Redshift cluster
#'
#' @description
#' Starts logging information, such as queries and connection attempts, for
#' the specified Amazon Redshift cluster.
#'
#' @usage
#' redshift_enable_logging(ClusterIdentifier, BucketName, S3KeyPrefix)
#'
#' @param ClusterIdentifier [required] The identifier of the cluster on which logging is to be started.
#'
#' Example: `examplecluster`
#' @param BucketName [required] The name of an existing S3 bucket where the log files are to be stored.
#'
#' Constraints:
#'
#' - Must be in the same region as the cluster
#'
#' - The cluster must have read bucket and put object permissions
#' @param S3KeyPrefix The prefix applied to the log file names.
#'
#' Constraints:
#'
#' - Cannot exceed 512 characters
#'
#' - Cannot contain spaces( ), double quotes ("), single quotes ('), a
#' backslash (\\), or control characters. The hexadecimal codes for
#' invalid characters are:
#'
#' - x00 to x20
#'
#' - x22
#'
#' - x27
#'
#' - x5c
#'
#' - x7f or larger
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' LoggingEnabled = TRUE|FALSE,
#' BucketName = "string",
#' S3KeyPrefix = "string",
#' LastSuccessfulDeliveryTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' LastFailureTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' LastFailureMessage = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$enable_logging(
#' ClusterIdentifier = "string",
#' BucketName = "string",
#' S3KeyPrefix = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_enable_logging
redshift_enable_logging <- function(ClusterIdentifier, BucketName, S3KeyPrefix = NULL) {
op <- new_operation(
name = "EnableLogging",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$enable_logging_input(ClusterIdentifier = ClusterIdentifier, BucketName = BucketName, S3KeyPrefix = S3KeyPrefix)
output <- .redshift$enable_logging_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$enable_logging <- redshift_enable_logging
#' Enables the automatic copy of snapshots from one region to another
#' region for a specified cluster
#'
#' @description
#' Enables the automatic copy of snapshots from one region to another
#' region for a specified cluster.
#'
#' @usage
#' redshift_enable_snapshot_copy(ClusterIdentifier, DestinationRegion,
#' RetentionPeriod, SnapshotCopyGrantName, ManualSnapshotRetentionPeriod)
#'
#' @param ClusterIdentifier [required] The unique identifier of the source cluster to copy snapshots from.
#'
#' Constraints: Must be the valid name of an existing cluster that does not
#' already have cross-region snapshot copy enabled.
#' @param DestinationRegion [required] The destination AWS Region that you want to copy snapshots to.
#'
#' Constraints: Must be the name of a valid AWS Region. For more
#' information, see [Regions and
#' Endpoints](https://docs.aws.amazon.com/general/latest/gr/rande.html#redshift_region)
#' in the Amazon Web Services General Reference.
#' @param RetentionPeriod The number of days to retain automated snapshots in the destination
#' region after they are copied from the source region.
#'
#' Default: 7.
#'
#' Constraints: Must be at least 1 and no more than 35.
#' @param SnapshotCopyGrantName The name of the snapshot copy grant to use when snapshots of an AWS
#' KMS-encrypted cluster are copied to the destination region.
#' @param ManualSnapshotRetentionPeriod The number of days to retain newly copied snapshots in the destination
#' AWS Region after they are copied from the source AWS Region. If the
#' value is -1, the manual snapshot is retained indefinitely.
#'
#' The value must be either -1 or an integer between 1 and 3,653.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Cluster = list(
#' ClusterIdentifier = "string",
#' NodeType = "string",
#' ClusterStatus = "string",
#' ClusterAvailabilityStatus = "string",
#' ModifyStatus = "string",
#' MasterUsername = "string",
#' DBName = "string",
#' Endpoint = list(
#' Address = "string",
#' Port = 123,
#' VpcEndpoints = list(
#' list(
#' VpcEndpointId = "string"
#' )
#' )
#' ),
#' ClusterCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' AutomatedSnapshotRetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' ClusterSecurityGroups = list(
#' list(
#' ClusterSecurityGroupName = "string",
#' Status = "string"
#' )
#' ),
#' VpcSecurityGroups = list(
#' list(
#' VpcSecurityGroupId = "string",
#' Status = "string"
#' )
#' ),
#' ClusterParameterGroups = list(
#' list(
#' ParameterGroupName = "string",
#' ParameterApplyStatus = "string",
#' ClusterParameterStatusList = list(
#' list(
#' ParameterName = "string",
#' ParameterApplyStatus = "string",
#' ParameterApplyErrorDescription = "string"
#' )
#' )
#' )
#' ),
#' ClusterSubnetGroupName = "string",
#' VpcId = "string",
#' AvailabilityZone = "string",
#' PreferredMaintenanceWindow = "string",
#' PendingModifiedValues = list(
#' MasterUserPassword = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' ClusterType = "string",
#' ClusterVersion = "string",
#' AutomatedSnapshotRetentionPeriod = 123,
#' ClusterIdentifier = "string",
#' PubliclyAccessible = TRUE|FALSE,
#' EnhancedVpcRouting = TRUE|FALSE,
#' MaintenanceTrackName = "string",
#' EncryptionType = "string"
#' ),
#' ClusterVersion = "string",
#' AllowVersionUpgrade = TRUE|FALSE,
#' NumberOfNodes = 123,
#' PubliclyAccessible = TRUE|FALSE,
#' Encrypted = TRUE|FALSE,
#' RestoreStatus = list(
#' Status = "string",
#' CurrentRestoreRateInMegaBytesPerSecond = 123.0,
#' SnapshotSizeInMegaBytes = 123,
#' ProgressInMegaBytes = 123,
#' ElapsedTimeInSeconds = 123,
#' EstimatedTimeToCompletionInSeconds = 123
#' ),
#' DataTransferProgress = list(
#' Status = "string",
#' CurrentRateInMegaBytesPerSecond = 123.0,
#' TotalDataInMegaBytes = 123,
#' DataTransferredInMegaBytes = 123,
#' EstimatedTimeToCompletionInSeconds = 123,
#' ElapsedTimeInSeconds = 123
#' ),
#' HsmStatus = list(
#' HsmClientCertificateIdentifier = "string",
#' HsmConfigurationIdentifier = "string",
#' Status = "string"
#' ),
#' ClusterSnapshotCopyStatus = list(
#' DestinationRegion = "string",
#' RetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' SnapshotCopyGrantName = "string"
#' ),
#' ClusterPublicKey = "string",
#' ClusterNodes = list(
#' list(
#' NodeRole = "string",
#' PrivateIPAddress = "string",
#' PublicIPAddress = "string"
#' )
#' ),
#' ElasticIpStatus = list(
#' ElasticIp = "string",
#' Status = "string"
#' ),
#' ClusterRevisionNumber = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' ),
#' KmsKeyId = "string",
#' EnhancedVpcRouting = TRUE|FALSE,
#' IamRoles = list(
#' list(
#' IamRoleArn = "string",
#' ApplyStatus = "string"
#' )
#' ),
#' PendingActions = list(
#' "string"
#' ),
#' MaintenanceTrackName = "string",
#' ElasticResizeNumberOfNodeOptions = "string",
#' DeferredMaintenanceWindows = list(
#' list(
#' DeferMaintenanceIdentifier = "string",
#' DeferMaintenanceStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' DeferMaintenanceEndTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' ),
#' SnapshotScheduleIdentifier = "string",
#' SnapshotScheduleState = "MODIFYING"|"ACTIVE"|"FAILED",
#' ExpectedNextSnapshotScheduleTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ExpectedNextSnapshotScheduleTimeStatus = "string",
#' NextMaintenanceWindowStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ResizeInfo = list(
#' ResizeType = "string",
#' AllowCancelResize = TRUE|FALSE
#' ),
#' AvailabilityZoneRelocationStatus = "string",
#' ClusterNamespaceArn = "string"
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$enable_snapshot_copy(
#' ClusterIdentifier = "string",
#' DestinationRegion = "string",
#' RetentionPeriod = 123,
#' SnapshotCopyGrantName = "string",
#' ManualSnapshotRetentionPeriod = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_enable_snapshot_copy
redshift_enable_snapshot_copy <- function(ClusterIdentifier, DestinationRegion, RetentionPeriod = NULL, SnapshotCopyGrantName = NULL, ManualSnapshotRetentionPeriod = NULL) {
op <- new_operation(
name = "EnableSnapshotCopy",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$enable_snapshot_copy_input(ClusterIdentifier = ClusterIdentifier, DestinationRegion = DestinationRegion, RetentionPeriod = RetentionPeriod, SnapshotCopyGrantName = SnapshotCopyGrantName, ManualSnapshotRetentionPeriod = ManualSnapshotRetentionPeriod)
output <- .redshift$enable_snapshot_copy_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$enable_snapshot_copy <- redshift_enable_snapshot_copy
#' Returns a database user name and temporary password with temporary
#' authorization to log on to an Amazon Redshift database
#'
#' @description
#' Returns a database user name and temporary password with temporary
#' authorization to log on to an Amazon Redshift database. The action
#' returns the database user name prefixed with `IAM:` if `AutoCreate` is
#' `False` or `IAMA:` if `AutoCreate` is `True`. You can optionally specify
#' one or more database user groups that the user will join at log on. By
#' default, the temporary credentials expire in 900 seconds. You can
#' optionally specify a duration between 900 seconds (15 minutes) and 3600
#' seconds (60 minutes). For more information, see [Using IAM
#' Authentication to Generate Database User
#' Credentials](https://docs.aws.amazon.com/redshift/latest/mgmt/generating-user-credentials.html)
#' in the Amazon Redshift Cluster Management Guide.
#'
#' The AWS Identity and Access Management (IAM)user or role that executes
#' GetClusterCredentials must have an IAM policy attached that allows
#' access to all necessary actions and resources. For more information
#' about permissions, see [Resource Policies for
#' GetClusterCredentials](https://docs.aws.amazon.com/redshift/latest/mgmt/redshift-iam-access-control-identity-based.html#redshift-policy-resources.getclustercredentials-resources)
#' in the Amazon Redshift Cluster Management Guide.
#'
#' If the `DbGroups` parameter is specified, the IAM policy must allow the
#' `redshift:JoinGroup` action with access to the listed `dbgroups`.
#'
#' In addition, if the `AutoCreate` parameter is set to `True`, then the
#' policy must include the `redshift:CreateClusterUser` privilege.
#'
#' If the `DbName` parameter is specified, the IAM policy must allow access
#' to the resource `dbname` for the specified database name.
#'
#' @usage
#' redshift_get_cluster_credentials(DbUser, DbName, ClusterIdentifier,
#' DurationSeconds, AutoCreate, DbGroups)
#'
#' @param DbUser [required] The name of a database user. If a user name matching `DbUser` exists in
#' the database, the temporary user credentials have the same permissions
#' as the existing user. If `DbUser` doesn't exist in the database and
#' `Autocreate` is `True`, a new user is created using the value for
#' `DbUser` with PUBLIC permissions. If a database user matching the value
#' for `DbUser` doesn't exist and `Autocreate` is `False`, then the command
#' succeeds but the connection attempt will fail because the user doesn't
#' exist in the database.
#'
#' For more information, see [CREATE
#' USER](https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_USER.html)
#' in the Amazon Redshift Database Developer Guide.
#'
#' Constraints:
#'
#' - Must be 1 to 64 alphanumeric characters or hyphens. The user name
#' can't be `PUBLIC`.
#'
#' - Must contain only lowercase letters, numbers, underscore, plus sign,
#' period (dot), at symbol (@@), or hyphen.
#'
#' - First character must be a letter.
#'
#' - Must not contain a colon ( : ) or slash ( / ).
#'
#' - Cannot be a reserved word. A list of reserved words can be found in
#' [Reserved
#' Words](https://docs.aws.amazon.com/redshift/latest/dg/r_pg_keywords.html)
#' in the Amazon Redshift Database Developer Guide.
#' @param DbName The name of a database that `DbUser` is authorized to log on to. If
#' `DbName` is not specified, `DbUser` can log on to any existing database.
#'
#' Constraints:
#'
#' - Must be 1 to 64 alphanumeric characters or hyphens
#'
#' - Must contain only lowercase letters, numbers, underscore, plus sign,
#' period (dot), at symbol (@@), or hyphen.
#'
#' - First character must be a letter.
#'
#' - Must not contain a colon ( : ) or slash ( / ).
#'
#' - Cannot be a reserved word. A list of reserved words can be found in
#' [Reserved
#' Words](https://docs.aws.amazon.com/redshift/latest/dg/r_pg_keywords.html)
#' in the Amazon Redshift Database Developer Guide.
#' @param ClusterIdentifier [required] The unique identifier of the cluster that contains the database for
#' which your are requesting credentials. This parameter is case sensitive.
#' @param DurationSeconds The number of seconds until the returned temporary password expires.
#'
#' Constraint: minimum 900, maximum 3600.
#'
#' Default: 900
#' @param AutoCreate Create a database user with the name specified for the user named in
#' `DbUser` if one does not exist.
#' @param DbGroups A list of the names of existing database groups that the user named in
#' `DbUser` will join for the current session, in addition to any group
#' memberships for an existing user. If not specified, a new user is added
#' only to PUBLIC.
#'
#' Database group name constraints
#'
#' - Must be 1 to 64 alphanumeric characters or hyphens
#'
#' - Must contain only lowercase letters, numbers, underscore, plus sign,
#' period (dot), at symbol (@@), or hyphen.
#'
#' - First character must be a letter.
#'
#' - Must not contain a colon ( : ) or slash ( / ).
#'
#' - Cannot be a reserved word. A list of reserved words can be found in
#' [Reserved
#' Words](https://docs.aws.amazon.com/redshift/latest/dg/r_pg_keywords.html)
#' in the Amazon Redshift Database Developer Guide.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' DbUser = "string",
#' DbPassword = "string",
#' Expiration = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$get_cluster_credentials(
#' DbUser = "string",
#' DbName = "string",
#' ClusterIdentifier = "string",
#' DurationSeconds = 123,
#' AutoCreate = TRUE|FALSE,
#' DbGroups = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_get_cluster_credentials
redshift_get_cluster_credentials <- function(DbUser, DbName = NULL, ClusterIdentifier, DurationSeconds = NULL, AutoCreate = NULL, DbGroups = NULL) {
op <- new_operation(
name = "GetClusterCredentials",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$get_cluster_credentials_input(DbUser = DbUser, DbName = DbName, ClusterIdentifier = ClusterIdentifier, DurationSeconds = DurationSeconds, AutoCreate = AutoCreate, DbGroups = DbGroups)
output <- .redshift$get_cluster_credentials_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$get_cluster_credentials <- redshift_get_cluster_credentials
#' Returns an array of DC2 ReservedNodeOfferings that matches the payment
#' type, term, and usage price of the given DC1 reserved node
#'
#' @description
#' Returns an array of DC2 ReservedNodeOfferings that matches the payment
#' type, term, and usage price of the given DC1 reserved node.
#'
#' @usage
#' redshift_get_reserved_node_exchange_offerings(ReservedNodeId,
#' MaxRecords, Marker)
#'
#' @param ReservedNodeId [required] A string representing the node identifier for the DC1 Reserved Node to
#' be exchanged.
#' @param MaxRecords An integer setting the maximum number of ReservedNodeOfferings to
#' retrieve.
#' @param Marker A value that indicates the starting point for the next set of
#' ReservedNodeOfferings.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Marker = "string",
#' ReservedNodeOfferings = list(
#' list(
#' ReservedNodeOfferingId = "string",
#' NodeType = "string",
#' Duration = 123,
#' FixedPrice = 123.0,
#' UsagePrice = 123.0,
#' CurrencyCode = "string",
#' OfferingType = "string",
#' RecurringCharges = list(
#' list(
#' RecurringChargeAmount = 123.0,
#' RecurringChargeFrequency = "string"
#' )
#' ),
#' ReservedNodeOfferingType = "Regular"|"Upgradable"
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$get_reserved_node_exchange_offerings(
#' ReservedNodeId = "string",
#' MaxRecords = 123,
#' Marker = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_get_reserved_node_exchange_offerings
redshift_get_reserved_node_exchange_offerings <- function(ReservedNodeId, MaxRecords = NULL, Marker = NULL) {
op <- new_operation(
name = "GetReservedNodeExchangeOfferings",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$get_reserved_node_exchange_offerings_input(ReservedNodeId = ReservedNodeId, MaxRecords = MaxRecords, Marker = Marker)
output <- .redshift$get_reserved_node_exchange_offerings_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$get_reserved_node_exchange_offerings <- redshift_get_reserved_node_exchange_offerings
#' Modifies the settings for a cluster
#'
#' @description
#' Modifies the settings for a cluster.
#'
#' You can also change node type and the number of nodes to scale up or
#' down the cluster. When resizing a cluster, you must specify both the
#' number of nodes and the node type even if one of the parameters does not
#' change.
#'
#' You can add another security or parameter group, or change the master
#' user password. Resetting a cluster password or modifying the security
#' groups associated with a cluster do not need a reboot. However,
#' modifying a parameter group requires a reboot for parameters to take
#' effect. For more information about managing clusters, go to [Amazon
#' Redshift
#' Clusters](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' @usage
#' redshift_modify_cluster(ClusterIdentifier, ClusterType, NodeType,
#' NumberOfNodes, ClusterSecurityGroups, VpcSecurityGroupIds,
#' MasterUserPassword, ClusterParameterGroupName,
#' AutomatedSnapshotRetentionPeriod, ManualSnapshotRetentionPeriod,
#' PreferredMaintenanceWindow, ClusterVersion, AllowVersionUpgrade,
#' HsmClientCertificateIdentifier, HsmConfigurationIdentifier,
#' NewClusterIdentifier, PubliclyAccessible, ElasticIp, EnhancedVpcRouting,
#' MaintenanceTrackName, Encrypted, KmsKeyId, AvailabilityZoneRelocation,
#' AvailabilityZone, Port)
#'
#' @param ClusterIdentifier [required] The unique identifier of the cluster to be modified.
#'
#' Example: `examplecluster`
#' @param ClusterType The new cluster type.
#'
#' When you submit your cluster resize request, your existing cluster goes
#' into a read-only mode. After Amazon Redshift provisions a new cluster
#' based on your resize requirements, there will be outage for a period
#' while the old cluster is deleted and your connection is switched to the
#' new cluster. You can use [`describe_resize`][redshift_describe_resize]
#' to track the progress of the resize request.
#'
#' Valid Values: ` multi-node | single-node `
#' @param NodeType The new node type of the cluster. If you specify a new node type, you
#' must also specify the number of nodes parameter.
#'
#' For more information about resizing clusters, go to [Resizing Clusters
#' in Amazon
#' Redshift](https://docs.aws.amazon.com/redshift/latest/mgmt/managing-cluster-operations.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' Valid Values: `ds2.xlarge` | `ds2.8xlarge` | `dc1.large` | `dc1.8xlarge`
#' | `dc2.large` | `dc2.8xlarge` | `ra3.xlplus` | `ra3.4xlarge` |
#' `ra3.16xlarge`
#' @param NumberOfNodes The new number of nodes of the cluster. If you specify a new number of
#' nodes, you must also specify the node type parameter.
#'
#' For more information about resizing clusters, go to [Resizing Clusters
#' in Amazon
#' Redshift](https://docs.aws.amazon.com/redshift/latest/mgmt/managing-cluster-operations.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' Valid Values: Integer greater than `0`.
#' @param ClusterSecurityGroups A list of cluster security groups to be authorized on this cluster. This
#' change is asynchronously applied as soon as possible.
#'
#' Security groups currently associated with the cluster, and not in the
#' list of groups to apply, will be revoked from the cluster.
#'
#' Constraints:
#'
#' - Must be 1 to 255 alphanumeric characters or hyphens
#'
#' - First character must be a letter
#'
#' - Cannot end with a hyphen or contain two consecutive hyphens
#' @param VpcSecurityGroupIds A list of virtual private cloud (VPC) security groups to be associated
#' with the cluster. This change is asynchronously applied as soon as
#' possible.
#' @param MasterUserPassword The new password for the cluster master user. This change is
#' asynchronously applied as soon as possible. Between the time of the
#' request and the completion of the request, the `MasterUserPassword`
#' element exists in the `PendingModifiedValues` element of the operation
#' response.
#'
#' Operations never return the password, so this operation provides a way
#' to regain access to the master user account for a cluster if the
#' password is lost.
#'
#' Default: Uses existing setting.
#'
#' Constraints:
#'
#' - Must be between 8 and 64 characters in length.
#'
#' - Must contain at least one uppercase letter.
#'
#' - Must contain at least one lowercase letter.
#'
#' - Must contain one number.
#'
#' - Can be any printable ASCII character (ASCII code 33 to 126) except '
#' (single quote), " (double quote), \\, /, @@, or space.
#' @param ClusterParameterGroupName The name of the cluster parameter group to apply to this cluster. This
#' change is applied only after the cluster is rebooted. To reboot a
#' cluster use [`reboot_cluster`][redshift_reboot_cluster].
#'
#' Default: Uses existing setting.
#'
#' Constraints: The cluster parameter group must be in the same parameter
#' group family that matches the cluster version.
#' @param AutomatedSnapshotRetentionPeriod The number of days that automated snapshots are retained. If the value
#' is 0, automated snapshots are disabled. Even if automated snapshots are
#' disabled, you can still create manual snapshots when you want with
#' [`create_cluster_snapshot`][redshift_create_cluster_snapshot].
#'
#' If you decrease the automated snapshot retention period from its current
#' value, existing automated snapshots that fall outside of the new
#' retention period will be immediately deleted.
#'
#' Default: Uses existing setting.
#'
#' Constraints: Must be a value from 0 to 35.
#' @param ManualSnapshotRetentionPeriod The default for number of days that a newly created manual snapshot is
#' retained. If the value is -1, the manual snapshot is retained
#' indefinitely. This value doesn't retroactively change the retention
#' periods of existing manual snapshots.
#'
#' The value must be either -1 or an integer between 1 and 3,653.
#'
#' The default value is -1.
#' @param PreferredMaintenanceWindow The weekly time range (in UTC) during which system maintenance can
#' occur, if necessary. If system maintenance is necessary during the
#' window, it may result in an outage.
#'
#' This maintenance window change is made immediately. If the new
#' maintenance window indicates the current time, there must be at least
#' 120 minutes between the current time and end of the window in order to
#' ensure that pending changes are applied.
#'
#' Default: Uses existing setting.
#'
#' Format: ddd:hh24:mi-ddd:hh24:mi, for example `wed:07:30-wed:08:00`.
#'
#' Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun
#'
#' Constraints: Must be at least 30 minutes.
#' @param ClusterVersion The new version number of the Amazon Redshift engine to upgrade to.
#'
#' For major version upgrades, if a non-default cluster parameter group is
#' currently in use, a new cluster parameter group in the cluster parameter
#' group family for the new version must be specified. The new cluster
#' parameter group can be the default for that cluster parameter group
#' family. For more information about parameters and parameter groups, go
#' to [Amazon Redshift Parameter
#' Groups](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' Example: `1.0`
#' @param AllowVersionUpgrade If `true`, major version upgrades will be applied automatically to the
#' cluster during the maintenance window.
#'
#' Default: `false`
#' @param HsmClientCertificateIdentifier Specifies the name of the HSM client certificate the Amazon Redshift
#' cluster uses to retrieve the data encryption keys stored in an HSM.
#' @param HsmConfigurationIdentifier Specifies the name of the HSM configuration that contains the
#' information the Amazon Redshift cluster can use to retrieve and store
#' keys in an HSM.
#' @param NewClusterIdentifier The new identifier for the cluster.
#'
#' Constraints:
#'
#' - Must contain from 1 to 63 alphanumeric characters or hyphens.
#'
#' - Alphabetic characters must be lowercase.
#'
#' - First character must be a letter.
#'
#' - Cannot end with a hyphen or contain two consecutive hyphens.
#'
#' - Must be unique for all clusters within an AWS account.
#'
#' Example: `examplecluster`
#' @param PubliclyAccessible If `true`, the cluster can be accessed from a public network. Only
#' clusters in VPCs can be set to be publicly available.
#' @param ElasticIp The Elastic IP (EIP) address for the cluster.
#'
#' Constraints: The cluster must be provisioned in EC2-VPC and
#' publicly-accessible through an Internet gateway. For more information
#' about provisioning clusters in EC2-VPC, go to [Supported Platforms to
#' Launch Your
#' Cluster](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#cluster-platforms)
#' in the Amazon Redshift Cluster Management Guide.
#' @param EnhancedVpcRouting An option that specifies whether to create the cluster with enhanced VPC
#' routing enabled. To create a cluster that uses enhanced VPC routing, the
#' cluster must be in a VPC. For more information, see [Enhanced VPC
#' Routing](https://docs.aws.amazon.com/redshift/latest/mgmt/enhanced-vpc-routing.html)
#' in the Amazon Redshift Cluster Management Guide.
#'
#' If this option is `true`, enhanced VPC routing is enabled.
#'
#' Default: false
#' @param MaintenanceTrackName The name for the maintenance track that you want to assign for the
#' cluster. This name change is asynchronous. The new track name stays in
#' the `PendingModifiedValues` for the cluster until the next maintenance
#' window. When the maintenance track changes, the cluster is switched to
#' the latest cluster release available for the maintenance track. At this
#' point, the maintenance track name is applied.
#' @param Encrypted Indicates whether the cluster is encrypted. If the value is encrypted
#' (true) and you provide a value for the `KmsKeyId` parameter, we encrypt
#' the cluster with the provided `KmsKeyId`. If you don't provide a
#' `KmsKeyId`, we encrypt with the default key.
#'
#' If the value is not encrypted (false), then the cluster is decrypted.
#' @param KmsKeyId The AWS Key Management Service (KMS) key ID of the encryption key that
#' you want to use to encrypt data in the cluster.
#' @param AvailabilityZoneRelocation The option to enable relocation for an Amazon Redshift cluster between
#' Availability Zones after the cluster modification is complete.
#' @param AvailabilityZone The option to initiate relocation for an Amazon Redshift cluster to the
#' target Availability Zone.
#' @param Port The option to change the port of an Amazon Redshift cluster.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Cluster = list(
#' ClusterIdentifier = "string",
#' NodeType = "string",
#' ClusterStatus = "string",
#' ClusterAvailabilityStatus = "string",
#' ModifyStatus = "string",
#' MasterUsername = "string",
#' DBName = "string",
#' Endpoint = list(
#' Address = "string",
#' Port = 123,
#' VpcEndpoints = list(
#' list(
#' VpcEndpointId = "string"
#' )
#' )
#' ),
#' ClusterCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' AutomatedSnapshotRetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' ClusterSecurityGroups = list(
#' list(
#' ClusterSecurityGroupName = "string",
#' Status = "string"
#' )
#' ),
#' VpcSecurityGroups = list(
#' list(
#' VpcSecurityGroupId = "string",
#' Status = "string"
#' )
#' ),
#' ClusterParameterGroups = list(
#' list(
#' ParameterGroupName = "string",
#' ParameterApplyStatus = "string",
#' ClusterParameterStatusList = list(
#' list(
#' ParameterName = "string",
#' ParameterApplyStatus = "string",
#' ParameterApplyErrorDescription = "string"
#' )
#' )
#' )
#' ),
#' ClusterSubnetGroupName = "string",
#' VpcId = "string",
#' AvailabilityZone = "string",
#' PreferredMaintenanceWindow = "string",
#' PendingModifiedValues = list(
#' MasterUserPassword = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' ClusterType = "string",
#' ClusterVersion = "string",
#' AutomatedSnapshotRetentionPeriod = 123,
#' ClusterIdentifier = "string",
#' PubliclyAccessible = TRUE|FALSE,
#' EnhancedVpcRouting = TRUE|FALSE,
#' MaintenanceTrackName = "string",
#' EncryptionType = "string"
#' ),
#' ClusterVersion = "string",
#' AllowVersionUpgrade = TRUE|FALSE,
#' NumberOfNodes = 123,
#' PubliclyAccessible = TRUE|FALSE,
#' Encrypted = TRUE|FALSE,
#' RestoreStatus = list(
#' Status = "string",
#' CurrentRestoreRateInMegaBytesPerSecond = 123.0,
#' SnapshotSizeInMegaBytes = 123,
#' ProgressInMegaBytes = 123,
#' ElapsedTimeInSeconds = 123,
#' EstimatedTimeToCompletionInSeconds = 123
#' ),
#' DataTransferProgress = list(
#' Status = "string",
#' CurrentRateInMegaBytesPerSecond = 123.0,
#' TotalDataInMegaBytes = 123,
#' DataTransferredInMegaBytes = 123,
#' EstimatedTimeToCompletionInSeconds = 123,
#' ElapsedTimeInSeconds = 123
#' ),
#' HsmStatus = list(
#' HsmClientCertificateIdentifier = "string",
#' HsmConfigurationIdentifier = "string",
#' Status = "string"
#' ),
#' ClusterSnapshotCopyStatus = list(
#' DestinationRegion = "string",
#' RetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' SnapshotCopyGrantName = "string"
#' ),
#' ClusterPublicKey = "string",
#' ClusterNodes = list(
#' list(
#' NodeRole = "string",
#' PrivateIPAddress = "string",
#' PublicIPAddress = "string"
#' )
#' ),
#' ElasticIpStatus = list(
#' ElasticIp = "string",
#' Status = "string"
#' ),
#' ClusterRevisionNumber = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' ),
#' KmsKeyId = "string",
#' EnhancedVpcRouting = TRUE|FALSE,
#' IamRoles = list(
#' list(
#' IamRoleArn = "string",
#' ApplyStatus = "string"
#' )
#' ),
#' PendingActions = list(
#' "string"
#' ),
#' MaintenanceTrackName = "string",
#' ElasticResizeNumberOfNodeOptions = "string",
#' DeferredMaintenanceWindows = list(
#' list(
#' DeferMaintenanceIdentifier = "string",
#' DeferMaintenanceStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' DeferMaintenanceEndTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' ),
#' SnapshotScheduleIdentifier = "string",
#' SnapshotScheduleState = "MODIFYING"|"ACTIVE"|"FAILED",
#' ExpectedNextSnapshotScheduleTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ExpectedNextSnapshotScheduleTimeStatus = "string",
#' NextMaintenanceWindowStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ResizeInfo = list(
#' ResizeType = "string",
#' AllowCancelResize = TRUE|FALSE
#' ),
#' AvailabilityZoneRelocationStatus = "string",
#' ClusterNamespaceArn = "string"
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$modify_cluster(
#' ClusterIdentifier = "string",
#' ClusterType = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' ClusterSecurityGroups = list(
#' "string"
#' ),
#' VpcSecurityGroupIds = list(
#' "string"
#' ),
#' MasterUserPassword = "string",
#' ClusterParameterGroupName = "string",
#' AutomatedSnapshotRetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' PreferredMaintenanceWindow = "string",
#' ClusterVersion = "string",
#' AllowVersionUpgrade = TRUE|FALSE,
#' HsmClientCertificateIdentifier = "string",
#' HsmConfigurationIdentifier = "string",
#' NewClusterIdentifier = "string",
#' PubliclyAccessible = TRUE|FALSE,
#' ElasticIp = "string",
#' EnhancedVpcRouting = TRUE|FALSE,
#' MaintenanceTrackName = "string",
#' Encrypted = TRUE|FALSE,
#' KmsKeyId = "string",
#' AvailabilityZoneRelocation = TRUE|FALSE,
#' AvailabilityZone = "string",
#' Port = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_modify_cluster
redshift_modify_cluster <- function(ClusterIdentifier, ClusterType = NULL, NodeType = NULL, NumberOfNodes = NULL, ClusterSecurityGroups = NULL, VpcSecurityGroupIds = NULL, MasterUserPassword = NULL, ClusterParameterGroupName = NULL, AutomatedSnapshotRetentionPeriod = NULL, ManualSnapshotRetentionPeriod = NULL, PreferredMaintenanceWindow = NULL, ClusterVersion = NULL, AllowVersionUpgrade = NULL, HsmClientCertificateIdentifier = NULL, HsmConfigurationIdentifier = NULL, NewClusterIdentifier = NULL, PubliclyAccessible = NULL, ElasticIp = NULL, EnhancedVpcRouting = NULL, MaintenanceTrackName = NULL, Encrypted = NULL, KmsKeyId = NULL, AvailabilityZoneRelocation = NULL, AvailabilityZone = NULL, Port = NULL) {
op <- new_operation(
name = "ModifyCluster",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$modify_cluster_input(ClusterIdentifier = ClusterIdentifier, ClusterType = ClusterType, NodeType = NodeType, NumberOfNodes = NumberOfNodes, ClusterSecurityGroups = ClusterSecurityGroups, VpcSecurityGroupIds = VpcSecurityGroupIds, MasterUserPassword = MasterUserPassword, ClusterParameterGroupName = ClusterParameterGroupName, AutomatedSnapshotRetentionPeriod = AutomatedSnapshotRetentionPeriod, ManualSnapshotRetentionPeriod = ManualSnapshotRetentionPeriod, PreferredMaintenanceWindow = PreferredMaintenanceWindow, ClusterVersion = ClusterVersion, AllowVersionUpgrade = AllowVersionUpgrade, HsmClientCertificateIdentifier = HsmClientCertificateIdentifier, HsmConfigurationIdentifier = HsmConfigurationIdentifier, NewClusterIdentifier = NewClusterIdentifier, PubliclyAccessible = PubliclyAccessible, ElasticIp = ElasticIp, EnhancedVpcRouting = EnhancedVpcRouting, MaintenanceTrackName = MaintenanceTrackName, Encrypted = Encrypted, KmsKeyId = KmsKeyId, AvailabilityZoneRelocation = AvailabilityZoneRelocation, AvailabilityZone = AvailabilityZone, Port = Port)
output <- .redshift$modify_cluster_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$modify_cluster <- redshift_modify_cluster
#' Modifies the database revision of a cluster
#'
#' @description
#' Modifies the database revision of a cluster. The database revision is a
#' unique revision of the database running in a cluster.
#'
#' @usage
#' redshift_modify_cluster_db_revision(ClusterIdentifier, RevisionTarget)
#'
#' @param ClusterIdentifier [required] The unique identifier of a cluster whose database revision you want to
#' modify.
#'
#' Example: `examplecluster`
#' @param RevisionTarget [required] The identifier of the database revision. You can retrieve this value
#' from the response to the
#' [`describe_cluster_db_revisions`][redshift_describe_cluster_db_revisions]
#' request.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Cluster = list(
#' ClusterIdentifier = "string",
#' NodeType = "string",
#' ClusterStatus = "string",
#' ClusterAvailabilityStatus = "string",
#' ModifyStatus = "string",
#' MasterUsername = "string",
#' DBName = "string",
#' Endpoint = list(
#' Address = "string",
#' Port = 123,
#' VpcEndpoints = list(
#' list(
#' VpcEndpointId = "string"
#' )
#' )
#' ),
#' ClusterCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' AutomatedSnapshotRetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' ClusterSecurityGroups = list(
#' list(
#' ClusterSecurityGroupName = "string",
#' Status = "string"
#' )
#' ),
#' VpcSecurityGroups = list(
#' list(
#' VpcSecurityGroupId = "string",
#' Status = "string"
#' )
#' ),
#' ClusterParameterGroups = list(
#' list(
#' ParameterGroupName = "string",
#' ParameterApplyStatus = "string",
#' ClusterParameterStatusList = list(
#' list(
#' ParameterName = "string",
#' ParameterApplyStatus = "string",
#' ParameterApplyErrorDescription = "string"
#' )
#' )
#' )
#' ),
#' ClusterSubnetGroupName = "string",
#' VpcId = "string",
#' AvailabilityZone = "string",
#' PreferredMaintenanceWindow = "string",
#' PendingModifiedValues = list(
#' MasterUserPassword = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' ClusterType = "string",
#' ClusterVersion = "string",
#' AutomatedSnapshotRetentionPeriod = 123,
#' ClusterIdentifier = "string",
#' PubliclyAccessible = TRUE|FALSE,
#' EnhancedVpcRouting = TRUE|FALSE,
#' MaintenanceTrackName = "string",
#' EncryptionType = "string"
#' ),
#' ClusterVersion = "string",
#' AllowVersionUpgrade = TRUE|FALSE,
#' NumberOfNodes = 123,
#' PubliclyAccessible = TRUE|FALSE,
#' Encrypted = TRUE|FALSE,
#' RestoreStatus = list(
#' Status = "string",
#' CurrentRestoreRateInMegaBytesPerSecond = 123.0,
#' SnapshotSizeInMegaBytes = 123,
#' ProgressInMegaBytes = 123,
#' ElapsedTimeInSeconds = 123,
#' EstimatedTimeToCompletionInSeconds = 123
#' ),
#' DataTransferProgress = list(
#' Status = "string",
#' CurrentRateInMegaBytesPerSecond = 123.0,
#' TotalDataInMegaBytes = 123,
#' DataTransferredInMegaBytes = 123,
#' EstimatedTimeToCompletionInSeconds = 123,
#' ElapsedTimeInSeconds = 123
#' ),
#' HsmStatus = list(
#' HsmClientCertificateIdentifier = "string",
#' HsmConfigurationIdentifier = "string",
#' Status = "string"
#' ),
#' ClusterSnapshotCopyStatus = list(
#' DestinationRegion = "string",
#' RetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' SnapshotCopyGrantName = "string"
#' ),
#' ClusterPublicKey = "string",
#' ClusterNodes = list(
#' list(
#' NodeRole = "string",
#' PrivateIPAddress = "string",
#' PublicIPAddress = "string"
#' )
#' ),
#' ElasticIpStatus = list(
#' ElasticIp = "string",
#' Status = "string"
#' ),
#' ClusterRevisionNumber = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' ),
#' KmsKeyId = "string",
#' EnhancedVpcRouting = TRUE|FALSE,
#' IamRoles = list(
#' list(
#' IamRoleArn = "string",
#' ApplyStatus = "string"
#' )
#' ),
#' PendingActions = list(
#' "string"
#' ),
#' MaintenanceTrackName = "string",
#' ElasticResizeNumberOfNodeOptions = "string",
#' DeferredMaintenanceWindows = list(
#' list(
#' DeferMaintenanceIdentifier = "string",
#' DeferMaintenanceStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' DeferMaintenanceEndTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' ),
#' SnapshotScheduleIdentifier = "string",
#' SnapshotScheduleState = "MODIFYING"|"ACTIVE"|"FAILED",
#' ExpectedNextSnapshotScheduleTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ExpectedNextSnapshotScheduleTimeStatus = "string",
#' NextMaintenanceWindowStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ResizeInfo = list(
#' ResizeType = "string",
#' AllowCancelResize = TRUE|FALSE
#' ),
#' AvailabilityZoneRelocationStatus = "string",
#' ClusterNamespaceArn = "string"
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$modify_cluster_db_revision(
#' ClusterIdentifier = "string",
#' RevisionTarget = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_modify_cluster_db_revision
redshift_modify_cluster_db_revision <- function(ClusterIdentifier, RevisionTarget) {
op <- new_operation(
name = "ModifyClusterDbRevision",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$modify_cluster_db_revision_input(ClusterIdentifier = ClusterIdentifier, RevisionTarget = RevisionTarget)
output <- .redshift$modify_cluster_db_revision_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$modify_cluster_db_revision <- redshift_modify_cluster_db_revision
#' Modifies the list of AWS Identity and Access Management (IAM) roles that
#' can be used by the cluster to access other AWS services
#'
#' @description
#' Modifies the list of AWS Identity and Access Management (IAM) roles that
#' can be used by the cluster to access other AWS services.
#'
#' A cluster can have up to 10 IAM roles associated at any time.
#'
#' @usage
#' redshift_modify_cluster_iam_roles(ClusterIdentifier, AddIamRoles,
#' RemoveIamRoles)
#'
#' @param ClusterIdentifier [required] The unique identifier of the cluster for which you want to associate or
#' disassociate IAM roles.
#' @param AddIamRoles Zero or more IAM roles to associate with the cluster. The roles must be
#' in their Amazon Resource Name (ARN) format. You can associate up to 10
#' IAM roles with a single cluster in a single request.
#' @param RemoveIamRoles Zero or more IAM roles in ARN format to disassociate from the cluster.
#' You can disassociate up to 10 IAM roles from a single cluster in a
#' single request.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Cluster = list(
#' ClusterIdentifier = "string",
#' NodeType = "string",
#' ClusterStatus = "string",
#' ClusterAvailabilityStatus = "string",
#' ModifyStatus = "string",
#' MasterUsername = "string",
#' DBName = "string",
#' Endpoint = list(
#' Address = "string",
#' Port = 123,
#' VpcEndpoints = list(
#' list(
#' VpcEndpointId = "string"
#' )
#' )
#' ),
#' ClusterCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' AutomatedSnapshotRetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' ClusterSecurityGroups = list(
#' list(
#' ClusterSecurityGroupName = "string",
#' Status = "string"
#' )
#' ),
#' VpcSecurityGroups = list(
#' list(
#' VpcSecurityGroupId = "string",
#' Status = "string"
#' )
#' ),
#' ClusterParameterGroups = list(
#' list(
#' ParameterGroupName = "string",
#' ParameterApplyStatus = "string",
#' ClusterParameterStatusList = list(
#' list(
#' ParameterName = "string",
#' ParameterApplyStatus = "string",
#' ParameterApplyErrorDescription = "string"
#' )
#' )
#' )
#' ),
#' ClusterSubnetGroupName = "string",
#' VpcId = "string",
#' AvailabilityZone = "string",
#' PreferredMaintenanceWindow = "string",
#' PendingModifiedValues = list(
#' MasterUserPassword = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' ClusterType = "string",
#' ClusterVersion = "string",
#' AutomatedSnapshotRetentionPeriod = 123,
#' ClusterIdentifier = "string",
#' PubliclyAccessible = TRUE|FALSE,
#' EnhancedVpcRouting = TRUE|FALSE,
#' MaintenanceTrackName = "string",
#' EncryptionType = "string"
#' ),
#' ClusterVersion = "string",
#' AllowVersionUpgrade = TRUE|FALSE,
#' NumberOfNodes = 123,
#' PubliclyAccessible = TRUE|FALSE,
#' Encrypted = TRUE|FALSE,
#' RestoreStatus = list(
#' Status = "string",
#' CurrentRestoreRateInMegaBytesPerSecond = 123.0,
#' SnapshotSizeInMegaBytes = 123,
#' ProgressInMegaBytes = 123,
#' ElapsedTimeInSeconds = 123,
#' EstimatedTimeToCompletionInSeconds = 123
#' ),
#' DataTransferProgress = list(
#' Status = "string",
#' CurrentRateInMegaBytesPerSecond = 123.0,
#' TotalDataInMegaBytes = 123,
#' DataTransferredInMegaBytes = 123,
#' EstimatedTimeToCompletionInSeconds = 123,
#' ElapsedTimeInSeconds = 123
#' ),
#' HsmStatus = list(
#' HsmClientCertificateIdentifier = "string",
#' HsmConfigurationIdentifier = "string",
#' Status = "string"
#' ),
#' ClusterSnapshotCopyStatus = list(
#' DestinationRegion = "string",
#' RetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' SnapshotCopyGrantName = "string"
#' ),
#' ClusterPublicKey = "string",
#' ClusterNodes = list(
#' list(
#' NodeRole = "string",
#' PrivateIPAddress = "string",
#' PublicIPAddress = "string"
#' )
#' ),
#' ElasticIpStatus = list(
#' ElasticIp = "string",
#' Status = "string"
#' ),
#' ClusterRevisionNumber = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' ),
#' KmsKeyId = "string",
#' EnhancedVpcRouting = TRUE|FALSE,
#' IamRoles = list(
#' list(
#' IamRoleArn = "string",
#' ApplyStatus = "string"
#' )
#' ),
#' PendingActions = list(
#' "string"
#' ),
#' MaintenanceTrackName = "string",
#' ElasticResizeNumberOfNodeOptions = "string",
#' DeferredMaintenanceWindows = list(
#' list(
#' DeferMaintenanceIdentifier = "string",
#' DeferMaintenanceStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' DeferMaintenanceEndTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' ),
#' SnapshotScheduleIdentifier = "string",
#' SnapshotScheduleState = "MODIFYING"|"ACTIVE"|"FAILED",
#' ExpectedNextSnapshotScheduleTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ExpectedNextSnapshotScheduleTimeStatus = "string",
#' NextMaintenanceWindowStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ResizeInfo = list(
#' ResizeType = "string",
#' AllowCancelResize = TRUE|FALSE
#' ),
#' AvailabilityZoneRelocationStatus = "string",
#' ClusterNamespaceArn = "string"
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$modify_cluster_iam_roles(
#' ClusterIdentifier = "string",
#' AddIamRoles = list(
#' "string"
#' ),
#' RemoveIamRoles = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_modify_cluster_iam_roles
redshift_modify_cluster_iam_roles <- function(ClusterIdentifier, AddIamRoles = NULL, RemoveIamRoles = NULL) {
op <- new_operation(
name = "ModifyClusterIamRoles",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$modify_cluster_iam_roles_input(ClusterIdentifier = ClusterIdentifier, AddIamRoles = AddIamRoles, RemoveIamRoles = RemoveIamRoles)
output <- .redshift$modify_cluster_iam_roles_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$modify_cluster_iam_roles <- redshift_modify_cluster_iam_roles
#' Modifies the maintenance settings of a cluster
#'
#' @description
#' Modifies the maintenance settings of a cluster.
#'
#' @usage
#' redshift_modify_cluster_maintenance(ClusterIdentifier, DeferMaintenance,
#' DeferMaintenanceIdentifier, DeferMaintenanceStartTime,
#' DeferMaintenanceEndTime, DeferMaintenanceDuration)
#'
#' @param ClusterIdentifier [required] A unique identifier for the cluster.
#' @param DeferMaintenance A boolean indicating whether to enable the deferred maintenance window.
#' @param DeferMaintenanceIdentifier A unique identifier for the deferred maintenance window.
#' @param DeferMaintenanceStartTime A timestamp indicating the start time for the deferred maintenance
#' window.
#' @param DeferMaintenanceEndTime A timestamp indicating end time for the deferred maintenance window. If
#' you specify an end time, you can't specify a duration.
#' @param DeferMaintenanceDuration An integer indicating the duration of the maintenance window in days. If
#' you specify a duration, you can't specify an end time. The duration must
#' be 45 days or less.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Cluster = list(
#' ClusterIdentifier = "string",
#' NodeType = "string",
#' ClusterStatus = "string",
#' ClusterAvailabilityStatus = "string",
#' ModifyStatus = "string",
#' MasterUsername = "string",
#' DBName = "string",
#' Endpoint = list(
#' Address = "string",
#' Port = 123,
#' VpcEndpoints = list(
#' list(
#' VpcEndpointId = "string"
#' )
#' )
#' ),
#' ClusterCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' AutomatedSnapshotRetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' ClusterSecurityGroups = list(
#' list(
#' ClusterSecurityGroupName = "string",
#' Status = "string"
#' )
#' ),
#' VpcSecurityGroups = list(
#' list(
#' VpcSecurityGroupId = "string",
#' Status = "string"
#' )
#' ),
#' ClusterParameterGroups = list(
#' list(
#' ParameterGroupName = "string",
#' ParameterApplyStatus = "string",
#' ClusterParameterStatusList = list(
#' list(
#' ParameterName = "string",
#' ParameterApplyStatus = "string",
#' ParameterApplyErrorDescription = "string"
#' )
#' )
#' )
#' ),
#' ClusterSubnetGroupName = "string",
#' VpcId = "string",
#' AvailabilityZone = "string",
#' PreferredMaintenanceWindow = "string",
#' PendingModifiedValues = list(
#' MasterUserPassword = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' ClusterType = "string",
#' ClusterVersion = "string",
#' AutomatedSnapshotRetentionPeriod = 123,
#' ClusterIdentifier = "string",
#' PubliclyAccessible = TRUE|FALSE,
#' EnhancedVpcRouting = TRUE|FALSE,
#' MaintenanceTrackName = "string",
#' EncryptionType = "string"
#' ),
#' ClusterVersion = "string",
#' AllowVersionUpgrade = TRUE|FALSE,
#' NumberOfNodes = 123,
#' PubliclyAccessible = TRUE|FALSE,
#' Encrypted = TRUE|FALSE,
#' RestoreStatus = list(
#' Status = "string",
#' CurrentRestoreRateInMegaBytesPerSecond = 123.0,
#' SnapshotSizeInMegaBytes = 123,
#' ProgressInMegaBytes = 123,
#' ElapsedTimeInSeconds = 123,
#' EstimatedTimeToCompletionInSeconds = 123
#' ),
#' DataTransferProgress = list(
#' Status = "string",
#' CurrentRateInMegaBytesPerSecond = 123.0,
#' TotalDataInMegaBytes = 123,
#' DataTransferredInMegaBytes = 123,
#' EstimatedTimeToCompletionInSeconds = 123,
#' ElapsedTimeInSeconds = 123
#' ),
#' HsmStatus = list(
#' HsmClientCertificateIdentifier = "string",
#' HsmConfigurationIdentifier = "string",
#' Status = "string"
#' ),
#' ClusterSnapshotCopyStatus = list(
#' DestinationRegion = "string",
#' RetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' SnapshotCopyGrantName = "string"
#' ),
#' ClusterPublicKey = "string",
#' ClusterNodes = list(
#' list(
#' NodeRole = "string",
#' PrivateIPAddress = "string",
#' PublicIPAddress = "string"
#' )
#' ),
#' ElasticIpStatus = list(
#' ElasticIp = "string",
#' Status = "string"
#' ),
#' ClusterRevisionNumber = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' ),
#' KmsKeyId = "string",
#' EnhancedVpcRouting = TRUE|FALSE,
#' IamRoles = list(
#' list(
#' IamRoleArn = "string",
#' ApplyStatus = "string"
#' )
#' ),
#' PendingActions = list(
#' "string"
#' ),
#' MaintenanceTrackName = "string",
#' ElasticResizeNumberOfNodeOptions = "string",
#' DeferredMaintenanceWindows = list(
#' list(
#' DeferMaintenanceIdentifier = "string",
#' DeferMaintenanceStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' DeferMaintenanceEndTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' ),
#' SnapshotScheduleIdentifier = "string",
#' SnapshotScheduleState = "MODIFYING"|"ACTIVE"|"FAILED",
#' ExpectedNextSnapshotScheduleTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ExpectedNextSnapshotScheduleTimeStatus = "string",
#' NextMaintenanceWindowStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ResizeInfo = list(
#' ResizeType = "string",
#' AllowCancelResize = TRUE|FALSE
#' ),
#' AvailabilityZoneRelocationStatus = "string",
#' ClusterNamespaceArn = "string"
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$modify_cluster_maintenance(
#' ClusterIdentifier = "string",
#' DeferMaintenance = TRUE|FALSE,
#' DeferMaintenanceIdentifier = "string",
#' DeferMaintenanceStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' DeferMaintenanceEndTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' DeferMaintenanceDuration = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_modify_cluster_maintenance
redshift_modify_cluster_maintenance <- function(ClusterIdentifier, DeferMaintenance = NULL, DeferMaintenanceIdentifier = NULL, DeferMaintenanceStartTime = NULL, DeferMaintenanceEndTime = NULL, DeferMaintenanceDuration = NULL) {
op <- new_operation(
name = "ModifyClusterMaintenance",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$modify_cluster_maintenance_input(ClusterIdentifier = ClusterIdentifier, DeferMaintenance = DeferMaintenance, DeferMaintenanceIdentifier = DeferMaintenanceIdentifier, DeferMaintenanceStartTime = DeferMaintenanceStartTime, DeferMaintenanceEndTime = DeferMaintenanceEndTime, DeferMaintenanceDuration = DeferMaintenanceDuration)
output <- .redshift$modify_cluster_maintenance_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$modify_cluster_maintenance <- redshift_modify_cluster_maintenance
#' Modifies the parameters of a parameter group
#'
#' @description
#' Modifies the parameters of a parameter group.
#'
#' For more information about parameters and parameter groups, go to
#' [Amazon Redshift Parameter
#' Groups](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' @usage
#' redshift_modify_cluster_parameter_group(ParameterGroupName, Parameters)
#'
#' @param ParameterGroupName [required] The name of the parameter group to be modified.
#' @param Parameters [required] An array of parameters to be modified. A maximum of 20 parameters can be
#' modified in a single request.
#'
#' For each parameter to be modified, you must supply at least the
#' parameter name and parameter value; other name-value pairs of the
#' parameter are optional.
#'
#' For the workload management (WLM) configuration, you must supply all the
#' name-value pairs in the wlm_json_configuration parameter.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' ParameterGroupName = "string",
#' ParameterGroupStatus = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$modify_cluster_parameter_group(
#' ParameterGroupName = "string",
#' Parameters = list(
#' list(
#' ParameterName = "string",
#' ParameterValue = "string",
#' Description = "string",
#' Source = "string",
#' DataType = "string",
#' AllowedValues = "string",
#' ApplyType = "static"|"dynamic",
#' IsModifiable = TRUE|FALSE,
#' MinimumEngineVersion = "string"
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_modify_cluster_parameter_group
redshift_modify_cluster_parameter_group <- function(ParameterGroupName, Parameters) {
op <- new_operation(
name = "ModifyClusterParameterGroup",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$modify_cluster_parameter_group_input(ParameterGroupName = ParameterGroupName, Parameters = Parameters)
output <- .redshift$modify_cluster_parameter_group_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$modify_cluster_parameter_group <- redshift_modify_cluster_parameter_group
#' Modifies the settings for a snapshot
#'
#' @description
#' Modifies the settings for a snapshot.
#'
#' This exanmple modifies the manual retention period setting for a cluster
#' snapshot.
#'
#' @usage
#' redshift_modify_cluster_snapshot(SnapshotIdentifier,
#' ManualSnapshotRetentionPeriod, Force)
#'
#' @param SnapshotIdentifier [required] The identifier of the snapshot whose setting you want to modify.
#' @param ManualSnapshotRetentionPeriod The number of days that a manual snapshot is retained. If the value is
#' -1, the manual snapshot is retained indefinitely.
#'
#' If the manual snapshot falls outside of the new retention period, you
#' can specify the force option to immediately delete the snapshot.
#'
#' The value must be either -1 or an integer between 1 and 3,653.
#' @param Force A Boolean option to override an exception if the retention period has
#' already passed.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Snapshot = list(
#' SnapshotIdentifier = "string",
#' ClusterIdentifier = "string",
#' SnapshotCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' Status = "string",
#' Port = 123,
#' AvailabilityZone = "string",
#' ClusterCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' MasterUsername = "string",
#' ClusterVersion = "string",
#' EngineFullVersion = "string",
#' SnapshotType = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' DBName = "string",
#' VpcId = "string",
#' Encrypted = TRUE|FALSE,
#' KmsKeyId = "string",
#' EncryptedWithHSM = TRUE|FALSE,
#' AccountsWithRestoreAccess = list(
#' list(
#' AccountId = "string",
#' AccountAlias = "string"
#' )
#' ),
#' OwnerAccount = "string",
#' TotalBackupSizeInMegaBytes = 123.0,
#' ActualIncrementalBackupSizeInMegaBytes = 123.0,
#' BackupProgressInMegaBytes = 123.0,
#' CurrentBackupRateInMegaBytesPerSecond = 123.0,
#' EstimatedSecondsToCompletion = 123,
#' ElapsedTimeInSeconds = 123,
#' SourceRegion = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' ),
#' RestorableNodeTypes = list(
#' "string"
#' ),
#' EnhancedVpcRouting = TRUE|FALSE,
#' MaintenanceTrackName = "string",
#' ManualSnapshotRetentionPeriod = 123,
#' ManualSnapshotRemainingDays = 123,
#' SnapshotRetentionStartTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$modify_cluster_snapshot(
#' SnapshotIdentifier = "string",
#' ManualSnapshotRetentionPeriod = 123,
#' Force = TRUE|FALSE
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_modify_cluster_snapshot
redshift_modify_cluster_snapshot <- function(SnapshotIdentifier, ManualSnapshotRetentionPeriod = NULL, Force = NULL) {
op <- new_operation(
name = "ModifyClusterSnapshot",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$modify_cluster_snapshot_input(SnapshotIdentifier = SnapshotIdentifier, ManualSnapshotRetentionPeriod = ManualSnapshotRetentionPeriod, Force = Force)
output <- .redshift$modify_cluster_snapshot_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$modify_cluster_snapshot <- redshift_modify_cluster_snapshot
#' Modifies a snapshot schedule for a cluster
#'
#' @description
#' Modifies a snapshot schedule for a cluster.
#'
#' @usage
#' redshift_modify_cluster_snapshot_schedule(ClusterIdentifier,
#' ScheduleIdentifier, DisassociateSchedule)
#'
#' @param ClusterIdentifier [required] A unique identifier for the cluster whose snapshot schedule you want to
#' modify.
#' @param ScheduleIdentifier A unique alphanumeric identifier for the schedule that you want to
#' associate with the cluster.
#' @param DisassociateSchedule A boolean to indicate whether to remove the assoiciation between the
#' cluster and the schedule.
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$modify_cluster_snapshot_schedule(
#' ClusterIdentifier = "string",
#' ScheduleIdentifier = "string",
#' DisassociateSchedule = TRUE|FALSE
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_modify_cluster_snapshot_schedule
redshift_modify_cluster_snapshot_schedule <- function(ClusterIdentifier, ScheduleIdentifier = NULL, DisassociateSchedule = NULL) {
op <- new_operation(
name = "ModifyClusterSnapshotSchedule",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$modify_cluster_snapshot_schedule_input(ClusterIdentifier = ClusterIdentifier, ScheduleIdentifier = ScheduleIdentifier, DisassociateSchedule = DisassociateSchedule)
output <- .redshift$modify_cluster_snapshot_schedule_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$modify_cluster_snapshot_schedule <- redshift_modify_cluster_snapshot_schedule
#' Modifies a cluster subnet group to include the specified list of VPC
#' subnets
#'
#' @description
#' Modifies a cluster subnet group to include the specified list of VPC
#' subnets. The operation replaces the existing list of subnets with the
#' new list of subnets.
#'
#' @usage
#' redshift_modify_cluster_subnet_group(ClusterSubnetGroupName,
#' Description, SubnetIds)
#'
#' @param ClusterSubnetGroupName [required] The name of the subnet group to be modified.
#' @param Description A text description of the subnet group to be modified.
#' @param SubnetIds [required] An array of VPC subnet IDs. A maximum of 20 subnets can be modified in a
#' single request.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' ClusterSubnetGroup = list(
#' ClusterSubnetGroupName = "string",
#' Description = "string",
#' VpcId = "string",
#' SubnetGroupStatus = "string",
#' Subnets = list(
#' list(
#' SubnetIdentifier = "string",
#' SubnetAvailabilityZone = list(
#' Name = "string",
#' SupportedPlatforms = list(
#' list(
#' Name = "string"
#' )
#' )
#' ),
#' SubnetStatus = "string"
#' )
#' ),
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$modify_cluster_subnet_group(
#' ClusterSubnetGroupName = "string",
#' Description = "string",
#' SubnetIds = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_modify_cluster_subnet_group
redshift_modify_cluster_subnet_group <- function(ClusterSubnetGroupName, Description = NULL, SubnetIds) {
op <- new_operation(
name = "ModifyClusterSubnetGroup",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$modify_cluster_subnet_group_input(ClusterSubnetGroupName = ClusterSubnetGroupName, Description = Description, SubnetIds = SubnetIds)
output <- .redshift$modify_cluster_subnet_group_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$modify_cluster_subnet_group <- redshift_modify_cluster_subnet_group
#' Modifies an existing Amazon Redshift event notification subscription
#'
#' @description
#' Modifies an existing Amazon Redshift event notification subscription.
#'
#' @usage
#' redshift_modify_event_subscription(SubscriptionName, SnsTopicArn,
#' SourceType, SourceIds, EventCategories, Severity, Enabled)
#'
#' @param SubscriptionName [required] The name of the modified Amazon Redshift event notification
#' subscription.
#' @param SnsTopicArn The Amazon Resource Name (ARN) of the SNS topic to be used by the event
#' notification subscription.
#' @param SourceType The type of source that will be generating the events. For example, if
#' you want to be notified of events generated by a cluster, you would set
#' this parameter to cluster. If this value is not specified, events are
#' returned for all Amazon Redshift objects in your AWS account. You must
#' specify a source type in order to specify source IDs.
#'
#' Valid values: cluster, cluster-parameter-group, cluster-security-group,
#' cluster-snapshot, and scheduled-action.
#' @param SourceIds A list of one or more identifiers of Amazon Redshift source objects. All
#' of the objects must be of the same type as was specified in the source
#' type parameter. The event subscription will return only events generated
#' by the specified objects. If not specified, then events are returned for
#' all objects within the source type specified.
#'
#' Example: my-cluster-1, my-cluster-2
#'
#' Example: my-snapshot-20131010
#' @param EventCategories Specifies the Amazon Redshift event categories to be published by the
#' event notification subscription.
#'
#' Values: configuration, management, monitoring, security
#' @param Severity Specifies the Amazon Redshift event severity to be published by the
#' event notification subscription.
#'
#' Values: ERROR, INFO
#' @param Enabled A Boolean value indicating if the subscription is enabled. `true`
#' indicates the subscription is enabled
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' EventSubscription = list(
#' CustomerAwsId = "string",
#' CustSubscriptionId = "string",
#' SnsTopicArn = "string",
#' Status = "string",
#' SubscriptionCreationTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' SourceType = "string",
#' SourceIdsList = list(
#' "string"
#' ),
#' EventCategoriesList = list(
#' "string"
#' ),
#' Severity = "string",
#' Enabled = TRUE|FALSE,
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$modify_event_subscription(
#' SubscriptionName = "string",
#' SnsTopicArn = "string",
#' SourceType = "string",
#' SourceIds = list(
#' "string"
#' ),
#' EventCategories = list(
#' "string"
#' ),
#' Severity = "string",
#' Enabled = TRUE|FALSE
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_modify_event_subscription
redshift_modify_event_subscription <- function(SubscriptionName, SnsTopicArn = NULL, SourceType = NULL, SourceIds = NULL, EventCategories = NULL, Severity = NULL, Enabled = NULL) {
op <- new_operation(
name = "ModifyEventSubscription",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$modify_event_subscription_input(SubscriptionName = SubscriptionName, SnsTopicArn = SnsTopicArn, SourceType = SourceType, SourceIds = SourceIds, EventCategories = EventCategories, Severity = Severity, Enabled = Enabled)
output <- .redshift$modify_event_subscription_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$modify_event_subscription <- redshift_modify_event_subscription
#' Modifies a scheduled action
#'
#' @description
#' Modifies a scheduled action.
#'
#' @usage
#' redshift_modify_scheduled_action(ScheduledActionName, TargetAction,
#' Schedule, IamRole, ScheduledActionDescription, StartTime, EndTime,
#' Enable)
#'
#' @param ScheduledActionName [required] The name of the scheduled action to modify.
#' @param TargetAction A modified JSON format of the scheduled action. For more information
#' about this parameter, see ScheduledAction.
#' @param Schedule A modified schedule in either `at( )` or `cron( )` format. For more
#' information about this parameter, see ScheduledAction.
#' @param IamRole A different IAM role to assume to run the target action. For more
#' information about this parameter, see ScheduledAction.
#' @param ScheduledActionDescription A modified description of the scheduled action.
#' @param StartTime A modified start time of the scheduled action. For more information
#' about this parameter, see ScheduledAction.
#' @param EndTime A modified end time of the scheduled action. For more information about
#' this parameter, see ScheduledAction.
#' @param Enable A modified enable flag of the scheduled action. If true, the scheduled
#' action is active. If false, the scheduled action is disabled.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' ScheduledActionName = "string",
#' TargetAction = list(
#' ResizeCluster = list(
#' ClusterIdentifier = "string",
#' ClusterType = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' Classic = TRUE|FALSE
#' ),
#' PauseCluster = list(
#' ClusterIdentifier = "string"
#' ),
#' ResumeCluster = list(
#' ClusterIdentifier = "string"
#' )
#' ),
#' Schedule = "string",
#' IamRole = "string",
#' ScheduledActionDescription = "string",
#' State = "ACTIVE"|"DISABLED",
#' NextInvocations = list(
#' as.POSIXct(
#' "2015-01-01"
#' )
#' ),
#' StartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' EndTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$modify_scheduled_action(
#' ScheduledActionName = "string",
#' TargetAction = list(
#' ResizeCluster = list(
#' ClusterIdentifier = "string",
#' ClusterType = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' Classic = TRUE|FALSE
#' ),
#' PauseCluster = list(
#' ClusterIdentifier = "string"
#' ),
#' ResumeCluster = list(
#' ClusterIdentifier = "string"
#' )
#' ),
#' Schedule = "string",
#' IamRole = "string",
#' ScheduledActionDescription = "string",
#' StartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' EndTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' Enable = TRUE|FALSE
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_modify_scheduled_action
redshift_modify_scheduled_action <- function(ScheduledActionName, TargetAction = NULL, Schedule = NULL, IamRole = NULL, ScheduledActionDescription = NULL, StartTime = NULL, EndTime = NULL, Enable = NULL) {
op <- new_operation(
name = "ModifyScheduledAction",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$modify_scheduled_action_input(ScheduledActionName = ScheduledActionName, TargetAction = TargetAction, Schedule = Schedule, IamRole = IamRole, ScheduledActionDescription = ScheduledActionDescription, StartTime = StartTime, EndTime = EndTime, Enable = Enable)
output <- .redshift$modify_scheduled_action_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$modify_scheduled_action <- redshift_modify_scheduled_action
#' Modifies the number of days to retain snapshots in the destination AWS
#' Region after they are copied from the source AWS Region
#'
#' @description
#' Modifies the number of days to retain snapshots in the destination AWS
#' Region after they are copied from the source AWS Region. By default,
#' this operation only changes the retention period of copied automated
#' snapshots. The retention periods for both new and existing copied
#' automated snapshots are updated with the new retention period. You can
#' set the manual option to change only the retention periods of copied
#' manual snapshots. If you set this option, only newly copied manual
#' snapshots have the new retention period.
#'
#' @usage
#' redshift_modify_snapshot_copy_retention_period(ClusterIdentifier,
#' RetentionPeriod, Manual)
#'
#' @param ClusterIdentifier [required] The unique identifier of the cluster for which you want to change the
#' retention period for either automated or manual snapshots that are
#' copied to a destination AWS Region.
#'
#' Constraints: Must be the valid name of an existing cluster that has
#' cross-region snapshot copy enabled.
#' @param RetentionPeriod [required] The number of days to retain automated snapshots in the destination AWS
#' Region after they are copied from the source AWS Region.
#'
#' By default, this only changes the retention period of copied automated
#' snapshots.
#'
#' If you decrease the retention period for automated snapshots that are
#' copied to a destination AWS Region, Amazon Redshift deletes any existing
#' automated snapshots that were copied to the destination AWS Region and
#' that fall outside of the new retention period.
#'
#' Constraints: Must be at least 1 and no more than 35 for automated
#' snapshots.
#'
#' If you specify the `manual` option, only newly copied manual snapshots
#' will have the new retention period.
#'
#' If you specify the value of -1 newly copied manual snapshots are
#' retained indefinitely.
#'
#' Constraints: The number of days must be either -1 or an integer between
#' 1 and 3,653 for manual snapshots.
#' @param Manual Indicates whether to apply the snapshot retention period to newly copied
#' manual snapshots instead of automated snapshots.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Cluster = list(
#' ClusterIdentifier = "string",
#' NodeType = "string",
#' ClusterStatus = "string",
#' ClusterAvailabilityStatus = "string",
#' ModifyStatus = "string",
#' MasterUsername = "string",
#' DBName = "string",
#' Endpoint = list(
#' Address = "string",
#' Port = 123,
#' VpcEndpoints = list(
#' list(
#' VpcEndpointId = "string"
#' )
#' )
#' ),
#' ClusterCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' AutomatedSnapshotRetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' ClusterSecurityGroups = list(
#' list(
#' ClusterSecurityGroupName = "string",
#' Status = "string"
#' )
#' ),
#' VpcSecurityGroups = list(
#' list(
#' VpcSecurityGroupId = "string",
#' Status = "string"
#' )
#' ),
#' ClusterParameterGroups = list(
#' list(
#' ParameterGroupName = "string",
#' ParameterApplyStatus = "string",
#' ClusterParameterStatusList = list(
#' list(
#' ParameterName = "string",
#' ParameterApplyStatus = "string",
#' ParameterApplyErrorDescription = "string"
#' )
#' )
#' )
#' ),
#' ClusterSubnetGroupName = "string",
#' VpcId = "string",
#' AvailabilityZone = "string",
#' PreferredMaintenanceWindow = "string",
#' PendingModifiedValues = list(
#' MasterUserPassword = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' ClusterType = "string",
#' ClusterVersion = "string",
#' AutomatedSnapshotRetentionPeriod = 123,
#' ClusterIdentifier = "string",
#' PubliclyAccessible = TRUE|FALSE,
#' EnhancedVpcRouting = TRUE|FALSE,
#' MaintenanceTrackName = "string",
#' EncryptionType = "string"
#' ),
#' ClusterVersion = "string",
#' AllowVersionUpgrade = TRUE|FALSE,
#' NumberOfNodes = 123,
#' PubliclyAccessible = TRUE|FALSE,
#' Encrypted = TRUE|FALSE,
#' RestoreStatus = list(
#' Status = "string",
#' CurrentRestoreRateInMegaBytesPerSecond = 123.0,
#' SnapshotSizeInMegaBytes = 123,
#' ProgressInMegaBytes = 123,
#' ElapsedTimeInSeconds = 123,
#' EstimatedTimeToCompletionInSeconds = 123
#' ),
#' DataTransferProgress = list(
#' Status = "string",
#' CurrentRateInMegaBytesPerSecond = 123.0,
#' TotalDataInMegaBytes = 123,
#' DataTransferredInMegaBytes = 123,
#' EstimatedTimeToCompletionInSeconds = 123,
#' ElapsedTimeInSeconds = 123
#' ),
#' HsmStatus = list(
#' HsmClientCertificateIdentifier = "string",
#' HsmConfigurationIdentifier = "string",
#' Status = "string"
#' ),
#' ClusterSnapshotCopyStatus = list(
#' DestinationRegion = "string",
#' RetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' SnapshotCopyGrantName = "string"
#' ),
#' ClusterPublicKey = "string",
#' ClusterNodes = list(
#' list(
#' NodeRole = "string",
#' PrivateIPAddress = "string",
#' PublicIPAddress = "string"
#' )
#' ),
#' ElasticIpStatus = list(
#' ElasticIp = "string",
#' Status = "string"
#' ),
#' ClusterRevisionNumber = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' ),
#' KmsKeyId = "string",
#' EnhancedVpcRouting = TRUE|FALSE,
#' IamRoles = list(
#' list(
#' IamRoleArn = "string",
#' ApplyStatus = "string"
#' )
#' ),
#' PendingActions = list(
#' "string"
#' ),
#' MaintenanceTrackName = "string",
#' ElasticResizeNumberOfNodeOptions = "string",
#' DeferredMaintenanceWindows = list(
#' list(
#' DeferMaintenanceIdentifier = "string",
#' DeferMaintenanceStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' DeferMaintenanceEndTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' ),
#' SnapshotScheduleIdentifier = "string",
#' SnapshotScheduleState = "MODIFYING"|"ACTIVE"|"FAILED",
#' ExpectedNextSnapshotScheduleTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ExpectedNextSnapshotScheduleTimeStatus = "string",
#' NextMaintenanceWindowStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ResizeInfo = list(
#' ResizeType = "string",
#' AllowCancelResize = TRUE|FALSE
#' ),
#' AvailabilityZoneRelocationStatus = "string",
#' ClusterNamespaceArn = "string"
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$modify_snapshot_copy_retention_period(
#' ClusterIdentifier = "string",
#' RetentionPeriod = 123,
#' Manual = TRUE|FALSE
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_modify_snapshot_copy_retention_period
redshift_modify_snapshot_copy_retention_period <- function(ClusterIdentifier, RetentionPeriod, Manual = NULL) {
op <- new_operation(
name = "ModifySnapshotCopyRetentionPeriod",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$modify_snapshot_copy_retention_period_input(ClusterIdentifier = ClusterIdentifier, RetentionPeriod = RetentionPeriod, Manual = Manual)
output <- .redshift$modify_snapshot_copy_retention_period_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$modify_snapshot_copy_retention_period <- redshift_modify_snapshot_copy_retention_period
#' Modifies a snapshot schedule
#'
#' @description
#' Modifies a snapshot schedule. Any schedule associated with a cluster is
#' modified asynchronously.
#'
#' @usage
#' redshift_modify_snapshot_schedule(ScheduleIdentifier,
#' ScheduleDefinitions)
#'
#' @param ScheduleIdentifier [required] A unique alphanumeric identifier of the schedule to modify.
#' @param ScheduleDefinitions [required] An updated list of schedule definitions. A schedule definition is made
#' up of schedule expressions, for example, "cron(30 12 *)" or "rate(12
#' hours)".
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' ScheduleDefinitions = list(
#' "string"
#' ),
#' ScheduleIdentifier = "string",
#' ScheduleDescription = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' ),
#' NextInvocations = list(
#' as.POSIXct(
#' "2015-01-01"
#' )
#' ),
#' AssociatedClusterCount = 123,
#' AssociatedClusters = list(
#' list(
#' ClusterIdentifier = "string",
#' ScheduleAssociationState = "MODIFYING"|"ACTIVE"|"FAILED"
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$modify_snapshot_schedule(
#' ScheduleIdentifier = "string",
#' ScheduleDefinitions = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_modify_snapshot_schedule
redshift_modify_snapshot_schedule <- function(ScheduleIdentifier, ScheduleDefinitions) {
op <- new_operation(
name = "ModifySnapshotSchedule",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$modify_snapshot_schedule_input(ScheduleIdentifier = ScheduleIdentifier, ScheduleDefinitions = ScheduleDefinitions)
output <- .redshift$modify_snapshot_schedule_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$modify_snapshot_schedule <- redshift_modify_snapshot_schedule
#' Modifies a usage limit in a cluster
#'
#' @description
#' Modifies a usage limit in a cluster. You can't modify the feature type
#' or period of a usage limit.
#'
#' @usage
#' redshift_modify_usage_limit(UsageLimitId, Amount, BreachAction)
#'
#' @param UsageLimitId [required] The identifier of the usage limit to modify.
#' @param Amount The new limit amount. For more information about this parameter, see
#' UsageLimit.
#' @param BreachAction The new action that Amazon Redshift takes when the limit is reached. For
#' more information about this parameter, see UsageLimit.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' UsageLimitId = "string",
#' ClusterIdentifier = "string",
#' FeatureType = "spectrum"|"concurrency-scaling",
#' LimitType = "time"|"data-scanned",
#' Amount = 123,
#' Period = "daily"|"weekly"|"monthly",
#' BreachAction = "log"|"emit-metric"|"disable",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$modify_usage_limit(
#' UsageLimitId = "string",
#' Amount = 123,
#' BreachAction = "log"|"emit-metric"|"disable"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_modify_usage_limit
redshift_modify_usage_limit <- function(UsageLimitId, Amount = NULL, BreachAction = NULL) {
op <- new_operation(
name = "ModifyUsageLimit",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$modify_usage_limit_input(UsageLimitId = UsageLimitId, Amount = Amount, BreachAction = BreachAction)
output <- .redshift$modify_usage_limit_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$modify_usage_limit <- redshift_modify_usage_limit
#' Pauses a cluster
#'
#' @description
#' Pauses a cluster.
#'
#' @usage
#' redshift_pause_cluster(ClusterIdentifier)
#'
#' @param ClusterIdentifier [required] The identifier of the cluster to be paused.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Cluster = list(
#' ClusterIdentifier = "string",
#' NodeType = "string",
#' ClusterStatus = "string",
#' ClusterAvailabilityStatus = "string",
#' ModifyStatus = "string",
#' MasterUsername = "string",
#' DBName = "string",
#' Endpoint = list(
#' Address = "string",
#' Port = 123,
#' VpcEndpoints = list(
#' list(
#' VpcEndpointId = "string"
#' )
#' )
#' ),
#' ClusterCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' AutomatedSnapshotRetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' ClusterSecurityGroups = list(
#' list(
#' ClusterSecurityGroupName = "string",
#' Status = "string"
#' )
#' ),
#' VpcSecurityGroups = list(
#' list(
#' VpcSecurityGroupId = "string",
#' Status = "string"
#' )
#' ),
#' ClusterParameterGroups = list(
#' list(
#' ParameterGroupName = "string",
#' ParameterApplyStatus = "string",
#' ClusterParameterStatusList = list(
#' list(
#' ParameterName = "string",
#' ParameterApplyStatus = "string",
#' ParameterApplyErrorDescription = "string"
#' )
#' )
#' )
#' ),
#' ClusterSubnetGroupName = "string",
#' VpcId = "string",
#' AvailabilityZone = "string",
#' PreferredMaintenanceWindow = "string",
#' PendingModifiedValues = list(
#' MasterUserPassword = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' ClusterType = "string",
#' ClusterVersion = "string",
#' AutomatedSnapshotRetentionPeriod = 123,
#' ClusterIdentifier = "string",
#' PubliclyAccessible = TRUE|FALSE,
#' EnhancedVpcRouting = TRUE|FALSE,
#' MaintenanceTrackName = "string",
#' EncryptionType = "string"
#' ),
#' ClusterVersion = "string",
#' AllowVersionUpgrade = TRUE|FALSE,
#' NumberOfNodes = 123,
#' PubliclyAccessible = TRUE|FALSE,
#' Encrypted = TRUE|FALSE,
#' RestoreStatus = list(
#' Status = "string",
#' CurrentRestoreRateInMegaBytesPerSecond = 123.0,
#' SnapshotSizeInMegaBytes = 123,
#' ProgressInMegaBytes = 123,
#' ElapsedTimeInSeconds = 123,
#' EstimatedTimeToCompletionInSeconds = 123
#' ),
#' DataTransferProgress = list(
#' Status = "string",
#' CurrentRateInMegaBytesPerSecond = 123.0,
#' TotalDataInMegaBytes = 123,
#' DataTransferredInMegaBytes = 123,
#' EstimatedTimeToCompletionInSeconds = 123,
#' ElapsedTimeInSeconds = 123
#' ),
#' HsmStatus = list(
#' HsmClientCertificateIdentifier = "string",
#' HsmConfigurationIdentifier = "string",
#' Status = "string"
#' ),
#' ClusterSnapshotCopyStatus = list(
#' DestinationRegion = "string",
#' RetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' SnapshotCopyGrantName = "string"
#' ),
#' ClusterPublicKey = "string",
#' ClusterNodes = list(
#' list(
#' NodeRole = "string",
#' PrivateIPAddress = "string",
#' PublicIPAddress = "string"
#' )
#' ),
#' ElasticIpStatus = list(
#' ElasticIp = "string",
#' Status = "string"
#' ),
#' ClusterRevisionNumber = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' ),
#' KmsKeyId = "string",
#' EnhancedVpcRouting = TRUE|FALSE,
#' IamRoles = list(
#' list(
#' IamRoleArn = "string",
#' ApplyStatus = "string"
#' )
#' ),
#' PendingActions = list(
#' "string"
#' ),
#' MaintenanceTrackName = "string",
#' ElasticResizeNumberOfNodeOptions = "string",
#' DeferredMaintenanceWindows = list(
#' list(
#' DeferMaintenanceIdentifier = "string",
#' DeferMaintenanceStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' DeferMaintenanceEndTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' ),
#' SnapshotScheduleIdentifier = "string",
#' SnapshotScheduleState = "MODIFYING"|"ACTIVE"|"FAILED",
#' ExpectedNextSnapshotScheduleTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ExpectedNextSnapshotScheduleTimeStatus = "string",
#' NextMaintenanceWindowStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ResizeInfo = list(
#' ResizeType = "string",
#' AllowCancelResize = TRUE|FALSE
#' ),
#' AvailabilityZoneRelocationStatus = "string",
#' ClusterNamespaceArn = "string"
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$pause_cluster(
#' ClusterIdentifier = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_pause_cluster
redshift_pause_cluster <- function(ClusterIdentifier) {
op <- new_operation(
name = "PauseCluster",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$pause_cluster_input(ClusterIdentifier = ClusterIdentifier)
output <- .redshift$pause_cluster_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$pause_cluster <- redshift_pause_cluster
#' Allows you to purchase reserved nodes
#'
#' @description
#' Allows you to purchase reserved nodes. Amazon Redshift offers a
#' predefined set of reserved node offerings. You can purchase one or more
#' of the offerings. You can call the
#' [`describe_reserved_node_offerings`][redshift_describe_reserved_node_offerings]
#' API to obtain the available reserved node offerings. You can call this
#' API by providing a specific reserved node offering and the number of
#' nodes you want to reserve.
#'
#' For more information about reserved node offerings, go to [Purchasing
#' Reserved
#' Nodes](https://docs.aws.amazon.com/redshift/latest/mgmt/purchase-reserved-node-instance.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' @usage
#' redshift_purchase_reserved_node_offering(ReservedNodeOfferingId,
#' NodeCount)
#'
#' @param ReservedNodeOfferingId [required] The unique identifier of the reserved node offering you want to
#' purchase.
#' @param NodeCount The number of reserved nodes that you want to purchase.
#'
#' Default: `1`
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' ReservedNode = list(
#' ReservedNodeId = "string",
#' ReservedNodeOfferingId = "string",
#' NodeType = "string",
#' StartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' Duration = 123,
#' FixedPrice = 123.0,
#' UsagePrice = 123.0,
#' CurrencyCode = "string",
#' NodeCount = 123,
#' State = "string",
#' OfferingType = "string",
#' RecurringCharges = list(
#' list(
#' RecurringChargeAmount = 123.0,
#' RecurringChargeFrequency = "string"
#' )
#' ),
#' ReservedNodeOfferingType = "Regular"|"Upgradable"
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$purchase_reserved_node_offering(
#' ReservedNodeOfferingId = "string",
#' NodeCount = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_purchase_reserved_node_offering
redshift_purchase_reserved_node_offering <- function(ReservedNodeOfferingId, NodeCount = NULL) {
op <- new_operation(
name = "PurchaseReservedNodeOffering",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$purchase_reserved_node_offering_input(ReservedNodeOfferingId = ReservedNodeOfferingId, NodeCount = NodeCount)
output <- .redshift$purchase_reserved_node_offering_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$purchase_reserved_node_offering <- redshift_purchase_reserved_node_offering
#' Reboots a cluster
#'
#' @description
#' Reboots a cluster. This action is taken as soon as possible. It results
#' in a momentary outage to the cluster, during which the cluster status is
#' set to `rebooting`. A cluster event is created when the reboot is
#' completed. Any pending cluster modifications (see
#' [`modify_cluster`][redshift_modify_cluster]) are applied at this reboot.
#' For more information about managing clusters, go to [Amazon Redshift
#' Clusters](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' @usage
#' redshift_reboot_cluster(ClusterIdentifier)
#'
#' @param ClusterIdentifier [required] The cluster identifier.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Cluster = list(
#' ClusterIdentifier = "string",
#' NodeType = "string",
#' ClusterStatus = "string",
#' ClusterAvailabilityStatus = "string",
#' ModifyStatus = "string",
#' MasterUsername = "string",
#' DBName = "string",
#' Endpoint = list(
#' Address = "string",
#' Port = 123,
#' VpcEndpoints = list(
#' list(
#' VpcEndpointId = "string"
#' )
#' )
#' ),
#' ClusterCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' AutomatedSnapshotRetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' ClusterSecurityGroups = list(
#' list(
#' ClusterSecurityGroupName = "string",
#' Status = "string"
#' )
#' ),
#' VpcSecurityGroups = list(
#' list(
#' VpcSecurityGroupId = "string",
#' Status = "string"
#' )
#' ),
#' ClusterParameterGroups = list(
#' list(
#' ParameterGroupName = "string",
#' ParameterApplyStatus = "string",
#' ClusterParameterStatusList = list(
#' list(
#' ParameterName = "string",
#' ParameterApplyStatus = "string",
#' ParameterApplyErrorDescription = "string"
#' )
#' )
#' )
#' ),
#' ClusterSubnetGroupName = "string",
#' VpcId = "string",
#' AvailabilityZone = "string",
#' PreferredMaintenanceWindow = "string",
#' PendingModifiedValues = list(
#' MasterUserPassword = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' ClusterType = "string",
#' ClusterVersion = "string",
#' AutomatedSnapshotRetentionPeriod = 123,
#' ClusterIdentifier = "string",
#' PubliclyAccessible = TRUE|FALSE,
#' EnhancedVpcRouting = TRUE|FALSE,
#' MaintenanceTrackName = "string",
#' EncryptionType = "string"
#' ),
#' ClusterVersion = "string",
#' AllowVersionUpgrade = TRUE|FALSE,
#' NumberOfNodes = 123,
#' PubliclyAccessible = TRUE|FALSE,
#' Encrypted = TRUE|FALSE,
#' RestoreStatus = list(
#' Status = "string",
#' CurrentRestoreRateInMegaBytesPerSecond = 123.0,
#' SnapshotSizeInMegaBytes = 123,
#' ProgressInMegaBytes = 123,
#' ElapsedTimeInSeconds = 123,
#' EstimatedTimeToCompletionInSeconds = 123
#' ),
#' DataTransferProgress = list(
#' Status = "string",
#' CurrentRateInMegaBytesPerSecond = 123.0,
#' TotalDataInMegaBytes = 123,
#' DataTransferredInMegaBytes = 123,
#' EstimatedTimeToCompletionInSeconds = 123,
#' ElapsedTimeInSeconds = 123
#' ),
#' HsmStatus = list(
#' HsmClientCertificateIdentifier = "string",
#' HsmConfigurationIdentifier = "string",
#' Status = "string"
#' ),
#' ClusterSnapshotCopyStatus = list(
#' DestinationRegion = "string",
#' RetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' SnapshotCopyGrantName = "string"
#' ),
#' ClusterPublicKey = "string",
#' ClusterNodes = list(
#' list(
#' NodeRole = "string",
#' PrivateIPAddress = "string",
#' PublicIPAddress = "string"
#' )
#' ),
#' ElasticIpStatus = list(
#' ElasticIp = "string",
#' Status = "string"
#' ),
#' ClusterRevisionNumber = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' ),
#' KmsKeyId = "string",
#' EnhancedVpcRouting = TRUE|FALSE,
#' IamRoles = list(
#' list(
#' IamRoleArn = "string",
#' ApplyStatus = "string"
#' )
#' ),
#' PendingActions = list(
#' "string"
#' ),
#' MaintenanceTrackName = "string",
#' ElasticResizeNumberOfNodeOptions = "string",
#' DeferredMaintenanceWindows = list(
#' list(
#' DeferMaintenanceIdentifier = "string",
#' DeferMaintenanceStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' DeferMaintenanceEndTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' ),
#' SnapshotScheduleIdentifier = "string",
#' SnapshotScheduleState = "MODIFYING"|"ACTIVE"|"FAILED",
#' ExpectedNextSnapshotScheduleTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ExpectedNextSnapshotScheduleTimeStatus = "string",
#' NextMaintenanceWindowStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ResizeInfo = list(
#' ResizeType = "string",
#' AllowCancelResize = TRUE|FALSE
#' ),
#' AvailabilityZoneRelocationStatus = "string",
#' ClusterNamespaceArn = "string"
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$reboot_cluster(
#' ClusterIdentifier = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_reboot_cluster
redshift_reboot_cluster <- function(ClusterIdentifier) {
op <- new_operation(
name = "RebootCluster",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$reboot_cluster_input(ClusterIdentifier = ClusterIdentifier)
output <- .redshift$reboot_cluster_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$reboot_cluster <- redshift_reboot_cluster
#' Sets one or more parameters of the specified parameter group to their
#' default values and sets the source values of the parameters to
#' "engine-default"
#'
#' @description
#' Sets one or more parameters of the specified parameter group to their
#' default values and sets the source values of the parameters to
#' "engine-default". To reset the entire parameter group specify the
#' *ResetAllParameters* parameter. For parameter changes to take effect you
#' must reboot any associated clusters.
#'
#' @usage
#' redshift_reset_cluster_parameter_group(ParameterGroupName,
#' ResetAllParameters, Parameters)
#'
#' @param ParameterGroupName [required] The name of the cluster parameter group to be reset.
#' @param ResetAllParameters If `true`, all parameters in the specified parameter group will be reset
#' to their default values.
#'
#' Default: `true`
#' @param Parameters An array of names of parameters to be reset. If *ResetAllParameters*
#' option is not used, then at least one parameter name must be supplied.
#'
#' Constraints: A maximum of 20 parameters can be reset in a single
#' request.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' ParameterGroupName = "string",
#' ParameterGroupStatus = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$reset_cluster_parameter_group(
#' ParameterGroupName = "string",
#' ResetAllParameters = TRUE|FALSE,
#' Parameters = list(
#' list(
#' ParameterName = "string",
#' ParameterValue = "string",
#' Description = "string",
#' Source = "string",
#' DataType = "string",
#' AllowedValues = "string",
#' ApplyType = "static"|"dynamic",
#' IsModifiable = TRUE|FALSE,
#' MinimumEngineVersion = "string"
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_reset_cluster_parameter_group
redshift_reset_cluster_parameter_group <- function(ParameterGroupName, ResetAllParameters = NULL, Parameters = NULL) {
op <- new_operation(
name = "ResetClusterParameterGroup",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$reset_cluster_parameter_group_input(ParameterGroupName = ParameterGroupName, ResetAllParameters = ResetAllParameters, Parameters = Parameters)
output <- .redshift$reset_cluster_parameter_group_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$reset_cluster_parameter_group <- redshift_reset_cluster_parameter_group
#' Changes the size of the cluster
#'
#' @description
#' Changes the size of the cluster. You can change the cluster's type, or
#' change the number or type of nodes. The default behavior is to use the
#' elastic resize method. With an elastic resize, your cluster is available
#' for read and write operations more quickly than with the classic resize
#' method.
#'
#' Elastic resize operations have the following restrictions:
#'
#' - You can only resize clusters of the following types:
#'
#' - dc1.large (if your cluster is in a VPC)
#'
#' - dc1.8xlarge (if your cluster is in a VPC)
#'
#' - dc2.large
#'
#' - dc2.8xlarge
#'
#' - ds2.xlarge
#'
#' - ds2.8xlarge
#'
#' - ra3.xlplus
#'
#' - ra3.4xlarge
#'
#' - ra3.16xlarge
#'
#' - The type of nodes that you add must match the node type for the
#' cluster.
#'
#' @usage
#' redshift_resize_cluster(ClusterIdentifier, ClusterType, NodeType,
#' NumberOfNodes, Classic)
#'
#' @param ClusterIdentifier [required] The unique identifier for the cluster to resize.
#' @param ClusterType The new cluster type for the specified cluster.
#' @param NodeType The new node type for the nodes you are adding. If not specified, the
#' cluster's current node type is used.
#' @param NumberOfNodes The new number of nodes for the cluster. If not specified, the cluster's
#' current number of nodes is used.
#' @param Classic A boolean value indicating whether the resize operation is using the
#' classic resize process. If you don't provide this parameter or set the
#' value to `false`, the resize type is elastic.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Cluster = list(
#' ClusterIdentifier = "string",
#' NodeType = "string",
#' ClusterStatus = "string",
#' ClusterAvailabilityStatus = "string",
#' ModifyStatus = "string",
#' MasterUsername = "string",
#' DBName = "string",
#' Endpoint = list(
#' Address = "string",
#' Port = 123,
#' VpcEndpoints = list(
#' list(
#' VpcEndpointId = "string"
#' )
#' )
#' ),
#' ClusterCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' AutomatedSnapshotRetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' ClusterSecurityGroups = list(
#' list(
#' ClusterSecurityGroupName = "string",
#' Status = "string"
#' )
#' ),
#' VpcSecurityGroups = list(
#' list(
#' VpcSecurityGroupId = "string",
#' Status = "string"
#' )
#' ),
#' ClusterParameterGroups = list(
#' list(
#' ParameterGroupName = "string",
#' ParameterApplyStatus = "string",
#' ClusterParameterStatusList = list(
#' list(
#' ParameterName = "string",
#' ParameterApplyStatus = "string",
#' ParameterApplyErrorDescription = "string"
#' )
#' )
#' )
#' ),
#' ClusterSubnetGroupName = "string",
#' VpcId = "string",
#' AvailabilityZone = "string",
#' PreferredMaintenanceWindow = "string",
#' PendingModifiedValues = list(
#' MasterUserPassword = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' ClusterType = "string",
#' ClusterVersion = "string",
#' AutomatedSnapshotRetentionPeriod = 123,
#' ClusterIdentifier = "string",
#' PubliclyAccessible = TRUE|FALSE,
#' EnhancedVpcRouting = TRUE|FALSE,
#' MaintenanceTrackName = "string",
#' EncryptionType = "string"
#' ),
#' ClusterVersion = "string",
#' AllowVersionUpgrade = TRUE|FALSE,
#' NumberOfNodes = 123,
#' PubliclyAccessible = TRUE|FALSE,
#' Encrypted = TRUE|FALSE,
#' RestoreStatus = list(
#' Status = "string",
#' CurrentRestoreRateInMegaBytesPerSecond = 123.0,
#' SnapshotSizeInMegaBytes = 123,
#' ProgressInMegaBytes = 123,
#' ElapsedTimeInSeconds = 123,
#' EstimatedTimeToCompletionInSeconds = 123
#' ),
#' DataTransferProgress = list(
#' Status = "string",
#' CurrentRateInMegaBytesPerSecond = 123.0,
#' TotalDataInMegaBytes = 123,
#' DataTransferredInMegaBytes = 123,
#' EstimatedTimeToCompletionInSeconds = 123,
#' ElapsedTimeInSeconds = 123
#' ),
#' HsmStatus = list(
#' HsmClientCertificateIdentifier = "string",
#' HsmConfigurationIdentifier = "string",
#' Status = "string"
#' ),
#' ClusterSnapshotCopyStatus = list(
#' DestinationRegion = "string",
#' RetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' SnapshotCopyGrantName = "string"
#' ),
#' ClusterPublicKey = "string",
#' ClusterNodes = list(
#' list(
#' NodeRole = "string",
#' PrivateIPAddress = "string",
#' PublicIPAddress = "string"
#' )
#' ),
#' ElasticIpStatus = list(
#' ElasticIp = "string",
#' Status = "string"
#' ),
#' ClusterRevisionNumber = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' ),
#' KmsKeyId = "string",
#' EnhancedVpcRouting = TRUE|FALSE,
#' IamRoles = list(
#' list(
#' IamRoleArn = "string",
#' ApplyStatus = "string"
#' )
#' ),
#' PendingActions = list(
#' "string"
#' ),
#' MaintenanceTrackName = "string",
#' ElasticResizeNumberOfNodeOptions = "string",
#' DeferredMaintenanceWindows = list(
#' list(
#' DeferMaintenanceIdentifier = "string",
#' DeferMaintenanceStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' DeferMaintenanceEndTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' ),
#' SnapshotScheduleIdentifier = "string",
#' SnapshotScheduleState = "MODIFYING"|"ACTIVE"|"FAILED",
#' ExpectedNextSnapshotScheduleTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ExpectedNextSnapshotScheduleTimeStatus = "string",
#' NextMaintenanceWindowStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ResizeInfo = list(
#' ResizeType = "string",
#' AllowCancelResize = TRUE|FALSE
#' ),
#' AvailabilityZoneRelocationStatus = "string",
#' ClusterNamespaceArn = "string"
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$resize_cluster(
#' ClusterIdentifier = "string",
#' ClusterType = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' Classic = TRUE|FALSE
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_resize_cluster
redshift_resize_cluster <- function(ClusterIdentifier, ClusterType = NULL, NodeType = NULL, NumberOfNodes = NULL, Classic = NULL) {
op <- new_operation(
name = "ResizeCluster",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$resize_cluster_input(ClusterIdentifier = ClusterIdentifier, ClusterType = ClusterType, NodeType = NodeType, NumberOfNodes = NumberOfNodes, Classic = Classic)
output <- .redshift$resize_cluster_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$resize_cluster <- redshift_resize_cluster
#' Creates a new cluster from a snapshot
#'
#' @description
#' Creates a new cluster from a snapshot. By default, Amazon Redshift
#' creates the resulting cluster with the same configuration as the
#' original cluster from which the snapshot was created, except that the
#' new cluster is created with the default cluster security and parameter
#' groups. After Amazon Redshift creates the cluster, you can use the
#' [`modify_cluster`][redshift_modify_cluster] API to associate a different
#' security group and different parameter group with the restored cluster.
#' If you are using a DS node type, you can also choose to change to
#' another DS node type of the same size during restore.
#'
#' If you restore a cluster into a VPC, you must provide a cluster subnet
#' group where you want the cluster restored.
#'
#' For more information about working with snapshots, go to [Amazon
#' Redshift
#' Snapshots](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-snapshots.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' @usage
#' redshift_restore_from_cluster_snapshot(ClusterIdentifier,
#' SnapshotIdentifier, SnapshotClusterIdentifier, Port, AvailabilityZone,
#' AllowVersionUpgrade, ClusterSubnetGroupName, PubliclyAccessible,
#' OwnerAccount, HsmClientCertificateIdentifier,
#' HsmConfigurationIdentifier, ElasticIp, ClusterParameterGroupName,
#' ClusterSecurityGroups, VpcSecurityGroupIds, PreferredMaintenanceWindow,
#' AutomatedSnapshotRetentionPeriod, ManualSnapshotRetentionPeriod,
#' KmsKeyId, NodeType, EnhancedVpcRouting, AdditionalInfo, IamRoles,
#' MaintenanceTrackName, SnapshotScheduleIdentifier, NumberOfNodes,
#' AvailabilityZoneRelocation)
#'
#' @param ClusterIdentifier [required] The identifier of the cluster that will be created from restoring the
#' snapshot.
#'
#' Constraints:
#'
#' - Must contain from 1 to 63 alphanumeric characters or hyphens.
#'
#' - Alphabetic characters must be lowercase.
#'
#' - First character must be a letter.
#'
#' - Cannot end with a hyphen or contain two consecutive hyphens.
#'
#' - Must be unique for all clusters within an AWS account.
#' @param SnapshotIdentifier [required] The name of the snapshot from which to create the new cluster. This
#' parameter isn't case sensitive.
#'
#' Example: `my-snapshot-id`
#' @param SnapshotClusterIdentifier The name of the cluster the source snapshot was created from. This
#' parameter is required if your IAM user has a policy containing a
#' snapshot resource element that specifies anything other than * for the
#' cluster name.
#' @param Port The port number on which the cluster accepts connections.
#'
#' Default: The same port as the original cluster.
#'
#' Constraints: Must be between `1115` and `65535`.
#' @param AvailabilityZone The Amazon EC2 Availability Zone in which to restore the cluster.
#'
#' Default: A random, system-chosen Availability Zone.
#'
#' Example: `us-east-2a`
#' @param AllowVersionUpgrade If `true`, major version upgrades can be applied during the maintenance
#' window to the Amazon Redshift engine that is running on the cluster.
#'
#' Default: `true`
#' @param ClusterSubnetGroupName The name of the subnet group where you want to cluster restored.
#'
#' A snapshot of cluster in VPC can be restored only in VPC. Therefore, you
#' must provide subnet group name where you want the cluster restored.
#' @param PubliclyAccessible If `true`, the cluster can be accessed from a public network.
#' @param OwnerAccount The AWS customer account used to create or copy the snapshot. Required
#' if you are restoring a snapshot you do not own, optional if you own the
#' snapshot.
#' @param HsmClientCertificateIdentifier Specifies the name of the HSM client certificate the Amazon Redshift
#' cluster uses to retrieve the data encryption keys stored in an HSM.
#' @param HsmConfigurationIdentifier Specifies the name of the HSM configuration that contains the
#' information the Amazon Redshift cluster can use to retrieve and store
#' keys in an HSM.
#' @param ElasticIp The elastic IP (EIP) address for the cluster.
#' @param ClusterParameterGroupName The name of the parameter group to be associated with this cluster.
#'
#' Default: The default Amazon Redshift cluster parameter group. For
#' information about the default parameter group, go to [Working with
#' Amazon Redshift Parameter
#' Groups](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html).
#'
#' Constraints:
#'
#' - Must be 1 to 255 alphanumeric characters or hyphens.
#'
#' - First character must be a letter.
#'
#' - Cannot end with a hyphen or contain two consecutive hyphens.
#' @param ClusterSecurityGroups A list of security groups to be associated with this cluster.
#'
#' Default: The default cluster security group for Amazon Redshift.
#'
#' Cluster security groups only apply to clusters outside of VPCs.
#' @param VpcSecurityGroupIds A list of Virtual Private Cloud (VPC) security groups to be associated
#' with the cluster.
#'
#' Default: The default VPC security group is associated with the cluster.
#'
#' VPC security groups only apply to clusters in VPCs.
#' @param PreferredMaintenanceWindow The weekly time range (in UTC) during which automated cluster
#' maintenance can occur.
#'
#' Format: `ddd:hh24:mi-ddd:hh24:mi`
#'
#' Default: The value selected for the cluster from which the snapshot was
#' taken. For more information about the time blocks for each region, see
#' [Maintenance
#' Windows](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#rs-maintenance-windows)
#' in Amazon Redshift Cluster Management Guide.
#'
#' Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun
#'
#' Constraints: Minimum 30-minute window.
#' @param AutomatedSnapshotRetentionPeriod The number of days that automated snapshots are retained. If the value
#' is 0, automated snapshots are disabled. Even if automated snapshots are
#' disabled, you can still create manual snapshots when you want with
#' [`create_cluster_snapshot`][redshift_create_cluster_snapshot].
#'
#' Default: The value selected for the cluster from which the snapshot was
#' taken.
#'
#' Constraints: Must be a value from 0 to 35.
#' @param ManualSnapshotRetentionPeriod The default number of days to retain a manual snapshot. If the value is
#' -1, the snapshot is retained indefinitely. This setting doesn't change
#' the retention period of existing snapshots.
#'
#' The value must be either -1 or an integer between 1 and 3,653.
#' @param KmsKeyId The AWS Key Management Service (KMS) key ID of the encryption key that
#' you want to use to encrypt data in the cluster that you restore from a
#' shared snapshot.
#' @param NodeType The node type that the restored cluster will be provisioned with.
#'
#' Default: The node type of the cluster from which the snapshot was taken.
#' You can modify this if you are using any DS node type. In that case, you
#' can choose to restore into another DS node type of the same size. For
#' example, you can restore ds1.8xlarge into ds2.8xlarge, or ds1.xlarge
#' into ds2.xlarge. If you have a DC instance type, you must restore into
#' that same instance type and size. In other words, you can only restore a
#' dc1.large instance type into another dc1.large instance type or
#' dc2.large instance type. You can't restore dc1.8xlarge to dc2.8xlarge.
#' First restore to a dc1.8xlarge cluster, then resize to a dc2.8large
#' cluster. For more information about node types, see [About Clusters and
#' Nodes](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#rs-about-clusters-and-nodes)
#' in the *Amazon Redshift Cluster Management Guide*.
#' @param EnhancedVpcRouting An option that specifies whether to create the cluster with enhanced VPC
#' routing enabled. To create a cluster that uses enhanced VPC routing, the
#' cluster must be in a VPC. For more information, see [Enhanced VPC
#' Routing](https://docs.aws.amazon.com/redshift/latest/mgmt/enhanced-vpc-routing.html)
#' in the Amazon Redshift Cluster Management Guide.
#'
#' If this option is `true`, enhanced VPC routing is enabled.
#'
#' Default: false
#' @param AdditionalInfo Reserved.
#' @param IamRoles A list of AWS Identity and Access Management (IAM) roles that can be
#' used by the cluster to access other AWS services. You must supply the
#' IAM roles in their Amazon Resource Name (ARN) format. You can supply up
#' to 10 IAM roles in a single request.
#'
#' A cluster can have up to 10 IAM roles associated at any time.
#' @param MaintenanceTrackName The name of the maintenance track for the restored cluster. When you
#' take a snapshot, the snapshot inherits the `MaintenanceTrack` value from
#' the cluster. The snapshot might be on a different track than the cluster
#' that was the source for the snapshot. For example, suppose that you take
#' a snapshot of a cluster that is on the current track and then change the
#' cluster to be on the trailing track. In this case, the snapshot and the
#' source cluster are on different tracks.
#' @param SnapshotScheduleIdentifier A unique identifier for the snapshot schedule.
#' @param NumberOfNodes The number of nodes specified when provisioning the restored cluster.
#' @param AvailabilityZoneRelocation The option to enable relocation for an Amazon Redshift cluster between
#' Availability Zones after the cluster is restored.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Cluster = list(
#' ClusterIdentifier = "string",
#' NodeType = "string",
#' ClusterStatus = "string",
#' ClusterAvailabilityStatus = "string",
#' ModifyStatus = "string",
#' MasterUsername = "string",
#' DBName = "string",
#' Endpoint = list(
#' Address = "string",
#' Port = 123,
#' VpcEndpoints = list(
#' list(
#' VpcEndpointId = "string"
#' )
#' )
#' ),
#' ClusterCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' AutomatedSnapshotRetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' ClusterSecurityGroups = list(
#' list(
#' ClusterSecurityGroupName = "string",
#' Status = "string"
#' )
#' ),
#' VpcSecurityGroups = list(
#' list(
#' VpcSecurityGroupId = "string",
#' Status = "string"
#' )
#' ),
#' ClusterParameterGroups = list(
#' list(
#' ParameterGroupName = "string",
#' ParameterApplyStatus = "string",
#' ClusterParameterStatusList = list(
#' list(
#' ParameterName = "string",
#' ParameterApplyStatus = "string",
#' ParameterApplyErrorDescription = "string"
#' )
#' )
#' )
#' ),
#' ClusterSubnetGroupName = "string",
#' VpcId = "string",
#' AvailabilityZone = "string",
#' PreferredMaintenanceWindow = "string",
#' PendingModifiedValues = list(
#' MasterUserPassword = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' ClusterType = "string",
#' ClusterVersion = "string",
#' AutomatedSnapshotRetentionPeriod = 123,
#' ClusterIdentifier = "string",
#' PubliclyAccessible = TRUE|FALSE,
#' EnhancedVpcRouting = TRUE|FALSE,
#' MaintenanceTrackName = "string",
#' EncryptionType = "string"
#' ),
#' ClusterVersion = "string",
#' AllowVersionUpgrade = TRUE|FALSE,
#' NumberOfNodes = 123,
#' PubliclyAccessible = TRUE|FALSE,
#' Encrypted = TRUE|FALSE,
#' RestoreStatus = list(
#' Status = "string",
#' CurrentRestoreRateInMegaBytesPerSecond = 123.0,
#' SnapshotSizeInMegaBytes = 123,
#' ProgressInMegaBytes = 123,
#' ElapsedTimeInSeconds = 123,
#' EstimatedTimeToCompletionInSeconds = 123
#' ),
#' DataTransferProgress = list(
#' Status = "string",
#' CurrentRateInMegaBytesPerSecond = 123.0,
#' TotalDataInMegaBytes = 123,
#' DataTransferredInMegaBytes = 123,
#' EstimatedTimeToCompletionInSeconds = 123,
#' ElapsedTimeInSeconds = 123
#' ),
#' HsmStatus = list(
#' HsmClientCertificateIdentifier = "string",
#' HsmConfigurationIdentifier = "string",
#' Status = "string"
#' ),
#' ClusterSnapshotCopyStatus = list(
#' DestinationRegion = "string",
#' RetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' SnapshotCopyGrantName = "string"
#' ),
#' ClusterPublicKey = "string",
#' ClusterNodes = list(
#' list(
#' NodeRole = "string",
#' PrivateIPAddress = "string",
#' PublicIPAddress = "string"
#' )
#' ),
#' ElasticIpStatus = list(
#' ElasticIp = "string",
#' Status = "string"
#' ),
#' ClusterRevisionNumber = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' ),
#' KmsKeyId = "string",
#' EnhancedVpcRouting = TRUE|FALSE,
#' IamRoles = list(
#' list(
#' IamRoleArn = "string",
#' ApplyStatus = "string"
#' )
#' ),
#' PendingActions = list(
#' "string"
#' ),
#' MaintenanceTrackName = "string",
#' ElasticResizeNumberOfNodeOptions = "string",
#' DeferredMaintenanceWindows = list(
#' list(
#' DeferMaintenanceIdentifier = "string",
#' DeferMaintenanceStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' DeferMaintenanceEndTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' ),
#' SnapshotScheduleIdentifier = "string",
#' SnapshotScheduleState = "MODIFYING"|"ACTIVE"|"FAILED",
#' ExpectedNextSnapshotScheduleTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ExpectedNextSnapshotScheduleTimeStatus = "string",
#' NextMaintenanceWindowStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ResizeInfo = list(
#' ResizeType = "string",
#' AllowCancelResize = TRUE|FALSE
#' ),
#' AvailabilityZoneRelocationStatus = "string",
#' ClusterNamespaceArn = "string"
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$restore_from_cluster_snapshot(
#' ClusterIdentifier = "string",
#' SnapshotIdentifier = "string",
#' SnapshotClusterIdentifier = "string",
#' Port = 123,
#' AvailabilityZone = "string",
#' AllowVersionUpgrade = TRUE|FALSE,
#' ClusterSubnetGroupName = "string",
#' PubliclyAccessible = TRUE|FALSE,
#' OwnerAccount = "string",
#' HsmClientCertificateIdentifier = "string",
#' HsmConfigurationIdentifier = "string",
#' ElasticIp = "string",
#' ClusterParameterGroupName = "string",
#' ClusterSecurityGroups = list(
#' "string"
#' ),
#' VpcSecurityGroupIds = list(
#' "string"
#' ),
#' PreferredMaintenanceWindow = "string",
#' AutomatedSnapshotRetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' KmsKeyId = "string",
#' NodeType = "string",
#' EnhancedVpcRouting = TRUE|FALSE,
#' AdditionalInfo = "string",
#' IamRoles = list(
#' "string"
#' ),
#' MaintenanceTrackName = "string",
#' SnapshotScheduleIdentifier = "string",
#' NumberOfNodes = 123,
#' AvailabilityZoneRelocation = TRUE|FALSE
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_restore_from_cluster_snapshot
redshift_restore_from_cluster_snapshot <- function(ClusterIdentifier, SnapshotIdentifier, SnapshotClusterIdentifier = NULL, Port = NULL, AvailabilityZone = NULL, AllowVersionUpgrade = NULL, ClusterSubnetGroupName = NULL, PubliclyAccessible = NULL, OwnerAccount = NULL, HsmClientCertificateIdentifier = NULL, HsmConfigurationIdentifier = NULL, ElasticIp = NULL, ClusterParameterGroupName = NULL, ClusterSecurityGroups = NULL, VpcSecurityGroupIds = NULL, PreferredMaintenanceWindow = NULL, AutomatedSnapshotRetentionPeriod = NULL, ManualSnapshotRetentionPeriod = NULL, KmsKeyId = NULL, NodeType = NULL, EnhancedVpcRouting = NULL, AdditionalInfo = NULL, IamRoles = NULL, MaintenanceTrackName = NULL, SnapshotScheduleIdentifier = NULL, NumberOfNodes = NULL, AvailabilityZoneRelocation = NULL) {
op <- new_operation(
name = "RestoreFromClusterSnapshot",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$restore_from_cluster_snapshot_input(ClusterIdentifier = ClusterIdentifier, SnapshotIdentifier = SnapshotIdentifier, SnapshotClusterIdentifier = SnapshotClusterIdentifier, Port = Port, AvailabilityZone = AvailabilityZone, AllowVersionUpgrade = AllowVersionUpgrade, ClusterSubnetGroupName = ClusterSubnetGroupName, PubliclyAccessible = PubliclyAccessible, OwnerAccount = OwnerAccount, HsmClientCertificateIdentifier = HsmClientCertificateIdentifier, HsmConfigurationIdentifier = HsmConfigurationIdentifier, ElasticIp = ElasticIp, ClusterParameterGroupName = ClusterParameterGroupName, ClusterSecurityGroups = ClusterSecurityGroups, VpcSecurityGroupIds = VpcSecurityGroupIds, PreferredMaintenanceWindow = PreferredMaintenanceWindow, AutomatedSnapshotRetentionPeriod = AutomatedSnapshotRetentionPeriod, ManualSnapshotRetentionPeriod = ManualSnapshotRetentionPeriod, KmsKeyId = KmsKeyId, NodeType = NodeType, EnhancedVpcRouting = EnhancedVpcRouting, AdditionalInfo = AdditionalInfo, IamRoles = IamRoles, MaintenanceTrackName = MaintenanceTrackName, SnapshotScheduleIdentifier = SnapshotScheduleIdentifier, NumberOfNodes = NumberOfNodes, AvailabilityZoneRelocation = AvailabilityZoneRelocation)
output <- .redshift$restore_from_cluster_snapshot_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$restore_from_cluster_snapshot <- redshift_restore_from_cluster_snapshot
#' Creates a new table from a table in an Amazon Redshift cluster snapshot
#'
#' @description
#' Creates a new table from a table in an Amazon Redshift cluster snapshot.
#' You must create the new table within the Amazon Redshift cluster that
#' the snapshot was taken from.
#'
#' You cannot use
#' [`restore_table_from_cluster_snapshot`][redshift_restore_table_from_cluster_snapshot]
#' to restore a table with the same name as an existing table in an Amazon
#' Redshift cluster. That is, you cannot overwrite an existing table in a
#' cluster with a restored table. If you want to replace your original
#' table with a new, restored table, then rename or drop your original
#' table before you call
#' [`restore_table_from_cluster_snapshot`][redshift_restore_table_from_cluster_snapshot].
#' When you have renamed your original table, then you can pass the
#' original name of the table as the `NewTableName` parameter value in the
#' call to
#' [`restore_table_from_cluster_snapshot`][redshift_restore_table_from_cluster_snapshot].
#' This way, you can replace the original table with the table created from
#' the snapshot.
#'
#' @usage
#' redshift_restore_table_from_cluster_snapshot(ClusterIdentifier,
#' SnapshotIdentifier, SourceDatabaseName, SourceSchemaName,
#' SourceTableName, TargetDatabaseName, TargetSchemaName, NewTableName)
#'
#' @param ClusterIdentifier [required] The identifier of the Amazon Redshift cluster to restore the table to.
#' @param SnapshotIdentifier [required] The identifier of the snapshot to restore the table from. This snapshot
#' must have been created from the Amazon Redshift cluster specified by the
#' `ClusterIdentifier` parameter.
#' @param SourceDatabaseName [required] The name of the source database that contains the table to restore from.
#' @param SourceSchemaName The name of the source schema that contains the table to restore from.
#' If you do not specify a `SourceSchemaName` value, the default is
#' `public`.
#' @param SourceTableName [required] The name of the source table to restore from.
#' @param TargetDatabaseName The name of the database to restore the table to.
#' @param TargetSchemaName The name of the schema to restore the table to.
#' @param NewTableName [required] The name of the table to create as a result of the current request.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' TableRestoreStatus = list(
#' TableRestoreRequestId = "string",
#' Status = "PENDING"|"IN_PROGRESS"|"SUCCEEDED"|"FAILED"|"CANCELED",
#' Message = "string",
#' RequestTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ProgressInMegaBytes = 123,
#' TotalDataInMegaBytes = 123,
#' ClusterIdentifier = "string",
#' SnapshotIdentifier = "string",
#' SourceDatabaseName = "string",
#' SourceSchemaName = "string",
#' SourceTableName = "string",
#' TargetDatabaseName = "string",
#' TargetSchemaName = "string",
#' NewTableName = "string"
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$restore_table_from_cluster_snapshot(
#' ClusterIdentifier = "string",
#' SnapshotIdentifier = "string",
#' SourceDatabaseName = "string",
#' SourceSchemaName = "string",
#' SourceTableName = "string",
#' TargetDatabaseName = "string",
#' TargetSchemaName = "string",
#' NewTableName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_restore_table_from_cluster_snapshot
redshift_restore_table_from_cluster_snapshot <- function(ClusterIdentifier, SnapshotIdentifier, SourceDatabaseName, SourceSchemaName = NULL, SourceTableName, TargetDatabaseName = NULL, TargetSchemaName = NULL, NewTableName) {
op <- new_operation(
name = "RestoreTableFromClusterSnapshot",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$restore_table_from_cluster_snapshot_input(ClusterIdentifier = ClusterIdentifier, SnapshotIdentifier = SnapshotIdentifier, SourceDatabaseName = SourceDatabaseName, SourceSchemaName = SourceSchemaName, SourceTableName = SourceTableName, TargetDatabaseName = TargetDatabaseName, TargetSchemaName = TargetSchemaName, NewTableName = NewTableName)
output <- .redshift$restore_table_from_cluster_snapshot_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$restore_table_from_cluster_snapshot <- redshift_restore_table_from_cluster_snapshot
#' Resumes a paused cluster
#'
#' @description
#' Resumes a paused cluster.
#'
#' @usage
#' redshift_resume_cluster(ClusterIdentifier)
#'
#' @param ClusterIdentifier [required] The identifier of the cluster to be resumed.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Cluster = list(
#' ClusterIdentifier = "string",
#' NodeType = "string",
#' ClusterStatus = "string",
#' ClusterAvailabilityStatus = "string",
#' ModifyStatus = "string",
#' MasterUsername = "string",
#' DBName = "string",
#' Endpoint = list(
#' Address = "string",
#' Port = 123,
#' VpcEndpoints = list(
#' list(
#' VpcEndpointId = "string"
#' )
#' )
#' ),
#' ClusterCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' AutomatedSnapshotRetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' ClusterSecurityGroups = list(
#' list(
#' ClusterSecurityGroupName = "string",
#' Status = "string"
#' )
#' ),
#' VpcSecurityGroups = list(
#' list(
#' VpcSecurityGroupId = "string",
#' Status = "string"
#' )
#' ),
#' ClusterParameterGroups = list(
#' list(
#' ParameterGroupName = "string",
#' ParameterApplyStatus = "string",
#' ClusterParameterStatusList = list(
#' list(
#' ParameterName = "string",
#' ParameterApplyStatus = "string",
#' ParameterApplyErrorDescription = "string"
#' )
#' )
#' )
#' ),
#' ClusterSubnetGroupName = "string",
#' VpcId = "string",
#' AvailabilityZone = "string",
#' PreferredMaintenanceWindow = "string",
#' PendingModifiedValues = list(
#' MasterUserPassword = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' ClusterType = "string",
#' ClusterVersion = "string",
#' AutomatedSnapshotRetentionPeriod = 123,
#' ClusterIdentifier = "string",
#' PubliclyAccessible = TRUE|FALSE,
#' EnhancedVpcRouting = TRUE|FALSE,
#' MaintenanceTrackName = "string",
#' EncryptionType = "string"
#' ),
#' ClusterVersion = "string",
#' AllowVersionUpgrade = TRUE|FALSE,
#' NumberOfNodes = 123,
#' PubliclyAccessible = TRUE|FALSE,
#' Encrypted = TRUE|FALSE,
#' RestoreStatus = list(
#' Status = "string",
#' CurrentRestoreRateInMegaBytesPerSecond = 123.0,
#' SnapshotSizeInMegaBytes = 123,
#' ProgressInMegaBytes = 123,
#' ElapsedTimeInSeconds = 123,
#' EstimatedTimeToCompletionInSeconds = 123
#' ),
#' DataTransferProgress = list(
#' Status = "string",
#' CurrentRateInMegaBytesPerSecond = 123.0,
#' TotalDataInMegaBytes = 123,
#' DataTransferredInMegaBytes = 123,
#' EstimatedTimeToCompletionInSeconds = 123,
#' ElapsedTimeInSeconds = 123
#' ),
#' HsmStatus = list(
#' HsmClientCertificateIdentifier = "string",
#' HsmConfigurationIdentifier = "string",
#' Status = "string"
#' ),
#' ClusterSnapshotCopyStatus = list(
#' DestinationRegion = "string",
#' RetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' SnapshotCopyGrantName = "string"
#' ),
#' ClusterPublicKey = "string",
#' ClusterNodes = list(
#' list(
#' NodeRole = "string",
#' PrivateIPAddress = "string",
#' PublicIPAddress = "string"
#' )
#' ),
#' ElasticIpStatus = list(
#' ElasticIp = "string",
#' Status = "string"
#' ),
#' ClusterRevisionNumber = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' ),
#' KmsKeyId = "string",
#' EnhancedVpcRouting = TRUE|FALSE,
#' IamRoles = list(
#' list(
#' IamRoleArn = "string",
#' ApplyStatus = "string"
#' )
#' ),
#' PendingActions = list(
#' "string"
#' ),
#' MaintenanceTrackName = "string",
#' ElasticResizeNumberOfNodeOptions = "string",
#' DeferredMaintenanceWindows = list(
#' list(
#' DeferMaintenanceIdentifier = "string",
#' DeferMaintenanceStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' DeferMaintenanceEndTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' ),
#' SnapshotScheduleIdentifier = "string",
#' SnapshotScheduleState = "MODIFYING"|"ACTIVE"|"FAILED",
#' ExpectedNextSnapshotScheduleTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ExpectedNextSnapshotScheduleTimeStatus = "string",
#' NextMaintenanceWindowStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ResizeInfo = list(
#' ResizeType = "string",
#' AllowCancelResize = TRUE|FALSE
#' ),
#' AvailabilityZoneRelocationStatus = "string",
#' ClusterNamespaceArn = "string"
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$resume_cluster(
#' ClusterIdentifier = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_resume_cluster
redshift_resume_cluster <- function(ClusterIdentifier) {
op <- new_operation(
name = "ResumeCluster",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$resume_cluster_input(ClusterIdentifier = ClusterIdentifier)
output <- .redshift$resume_cluster_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$resume_cluster <- redshift_resume_cluster
#' Revokes an ingress rule in an Amazon Redshift security group for a
#' previously authorized IP range or Amazon EC2 security group
#'
#' @description
#' Revokes an ingress rule in an Amazon Redshift security group for a
#' previously authorized IP range or Amazon EC2 security group. To add an
#' ingress rule, see
#' [`authorize_cluster_security_group_ingress`][redshift_authorize_cluster_security_group_ingress].
#' For information about managing security groups, go to [Amazon Redshift
#' Cluster Security
#' Groups](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-security-groups.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' @usage
#' redshift_revoke_cluster_security_group_ingress(ClusterSecurityGroupName,
#' CIDRIP, EC2SecurityGroupName, EC2SecurityGroupOwnerId)
#'
#' @param ClusterSecurityGroupName [required] The name of the security Group from which to revoke the ingress rule.
#' @param CIDRIP The IP range for which to revoke access. This range must be a valid
#' Classless Inter-Domain Routing (CIDR) block of IP addresses. If `CIDRIP`
#' is specified, `EC2SecurityGroupName` and `EC2SecurityGroupOwnerId`
#' cannot be provided.
#' @param EC2SecurityGroupName The name of the EC2 Security Group whose access is to be revoked. If
#' `EC2SecurityGroupName` is specified, `EC2SecurityGroupOwnerId` must also
#' be provided and `CIDRIP` cannot be provided.
#' @param EC2SecurityGroupOwnerId The AWS account number of the owner of the security group specified in
#' the `EC2SecurityGroupName` parameter. The AWS access key ID is not an
#' acceptable value. If `EC2SecurityGroupOwnerId` is specified,
#' `EC2SecurityGroupName` must also be provided. and `CIDRIP` cannot be
#' provided.
#'
#' Example: `111122223333`
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' ClusterSecurityGroup = list(
#' ClusterSecurityGroupName = "string",
#' Description = "string",
#' EC2SecurityGroups = list(
#' list(
#' Status = "string",
#' EC2SecurityGroupName = "string",
#' EC2SecurityGroupOwnerId = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' ),
#' IPRanges = list(
#' list(
#' Status = "string",
#' CIDRIP = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' ),
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$revoke_cluster_security_group_ingress(
#' ClusterSecurityGroupName = "string",
#' CIDRIP = "string",
#' EC2SecurityGroupName = "string",
#' EC2SecurityGroupOwnerId = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_revoke_cluster_security_group_ingress
redshift_revoke_cluster_security_group_ingress <- function(ClusterSecurityGroupName, CIDRIP = NULL, EC2SecurityGroupName = NULL, EC2SecurityGroupOwnerId = NULL) {
op <- new_operation(
name = "RevokeClusterSecurityGroupIngress",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$revoke_cluster_security_group_ingress_input(ClusterSecurityGroupName = ClusterSecurityGroupName, CIDRIP = CIDRIP, EC2SecurityGroupName = EC2SecurityGroupName, EC2SecurityGroupOwnerId = EC2SecurityGroupOwnerId)
output <- .redshift$revoke_cluster_security_group_ingress_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$revoke_cluster_security_group_ingress <- redshift_revoke_cluster_security_group_ingress
#' Removes the ability of the specified AWS customer account to restore the
#' specified snapshot
#'
#' @description
#' Removes the ability of the specified AWS customer account to restore the
#' specified snapshot. If the account is currently restoring the snapshot,
#' the restore will run to completion.
#'
#' For more information about working with snapshots, go to [Amazon
#' Redshift
#' Snapshots](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-snapshots.html)
#' in the *Amazon Redshift Cluster Management Guide*.
#'
#' @usage
#' redshift_revoke_snapshot_access(SnapshotIdentifier,
#' SnapshotClusterIdentifier, AccountWithRestoreAccess)
#'
#' @param SnapshotIdentifier [required] The identifier of the snapshot that the account can no longer access.
#' @param SnapshotClusterIdentifier The identifier of the cluster the snapshot was created from. This
#' parameter is required if your IAM user has a policy containing a
#' snapshot resource element that specifies anything other than * for the
#' cluster name.
#' @param AccountWithRestoreAccess [required] The identifier of the AWS customer account that can no longer restore
#' the specified snapshot.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Snapshot = list(
#' SnapshotIdentifier = "string",
#' ClusterIdentifier = "string",
#' SnapshotCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' Status = "string",
#' Port = 123,
#' AvailabilityZone = "string",
#' ClusterCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' MasterUsername = "string",
#' ClusterVersion = "string",
#' EngineFullVersion = "string",
#' SnapshotType = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' DBName = "string",
#' VpcId = "string",
#' Encrypted = TRUE|FALSE,
#' KmsKeyId = "string",
#' EncryptedWithHSM = TRUE|FALSE,
#' AccountsWithRestoreAccess = list(
#' list(
#' AccountId = "string",
#' AccountAlias = "string"
#' )
#' ),
#' OwnerAccount = "string",
#' TotalBackupSizeInMegaBytes = 123.0,
#' ActualIncrementalBackupSizeInMegaBytes = 123.0,
#' BackupProgressInMegaBytes = 123.0,
#' CurrentBackupRateInMegaBytesPerSecond = 123.0,
#' EstimatedSecondsToCompletion = 123,
#' ElapsedTimeInSeconds = 123,
#' SourceRegion = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' ),
#' RestorableNodeTypes = list(
#' "string"
#' ),
#' EnhancedVpcRouting = TRUE|FALSE,
#' MaintenanceTrackName = "string",
#' ManualSnapshotRetentionPeriod = 123,
#' ManualSnapshotRemainingDays = 123,
#' SnapshotRetentionStartTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$revoke_snapshot_access(
#' SnapshotIdentifier = "string",
#' SnapshotClusterIdentifier = "string",
#' AccountWithRestoreAccess = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_revoke_snapshot_access
redshift_revoke_snapshot_access <- function(SnapshotIdentifier, SnapshotClusterIdentifier = NULL, AccountWithRestoreAccess) {
op <- new_operation(
name = "RevokeSnapshotAccess",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$revoke_snapshot_access_input(SnapshotIdentifier = SnapshotIdentifier, SnapshotClusterIdentifier = SnapshotClusterIdentifier, AccountWithRestoreAccess = AccountWithRestoreAccess)
output <- .redshift$revoke_snapshot_access_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$revoke_snapshot_access <- redshift_revoke_snapshot_access
#' Rotates the encryption keys for a cluster
#'
#' @description
#' Rotates the encryption keys for a cluster.
#'
#' @usage
#' redshift_rotate_encryption_key(ClusterIdentifier)
#'
#' @param ClusterIdentifier [required] The unique identifier of the cluster that you want to rotate the
#' encryption keys for.
#'
#' Constraints: Must be the name of valid cluster that has encryption
#' enabled.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Cluster = list(
#' ClusterIdentifier = "string",
#' NodeType = "string",
#' ClusterStatus = "string",
#' ClusterAvailabilityStatus = "string",
#' ModifyStatus = "string",
#' MasterUsername = "string",
#' DBName = "string",
#' Endpoint = list(
#' Address = "string",
#' Port = 123,
#' VpcEndpoints = list(
#' list(
#' VpcEndpointId = "string"
#' )
#' )
#' ),
#' ClusterCreateTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' AutomatedSnapshotRetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' ClusterSecurityGroups = list(
#' list(
#' ClusterSecurityGroupName = "string",
#' Status = "string"
#' )
#' ),
#' VpcSecurityGroups = list(
#' list(
#' VpcSecurityGroupId = "string",
#' Status = "string"
#' )
#' ),
#' ClusterParameterGroups = list(
#' list(
#' ParameterGroupName = "string",
#' ParameterApplyStatus = "string",
#' ClusterParameterStatusList = list(
#' list(
#' ParameterName = "string",
#' ParameterApplyStatus = "string",
#' ParameterApplyErrorDescription = "string"
#' )
#' )
#' )
#' ),
#' ClusterSubnetGroupName = "string",
#' VpcId = "string",
#' AvailabilityZone = "string",
#' PreferredMaintenanceWindow = "string",
#' PendingModifiedValues = list(
#' MasterUserPassword = "string",
#' NodeType = "string",
#' NumberOfNodes = 123,
#' ClusterType = "string",
#' ClusterVersion = "string",
#' AutomatedSnapshotRetentionPeriod = 123,
#' ClusterIdentifier = "string",
#' PubliclyAccessible = TRUE|FALSE,
#' EnhancedVpcRouting = TRUE|FALSE,
#' MaintenanceTrackName = "string",
#' EncryptionType = "string"
#' ),
#' ClusterVersion = "string",
#' AllowVersionUpgrade = TRUE|FALSE,
#' NumberOfNodes = 123,
#' PubliclyAccessible = TRUE|FALSE,
#' Encrypted = TRUE|FALSE,
#' RestoreStatus = list(
#' Status = "string",
#' CurrentRestoreRateInMegaBytesPerSecond = 123.0,
#' SnapshotSizeInMegaBytes = 123,
#' ProgressInMegaBytes = 123,
#' ElapsedTimeInSeconds = 123,
#' EstimatedTimeToCompletionInSeconds = 123
#' ),
#' DataTransferProgress = list(
#' Status = "string",
#' CurrentRateInMegaBytesPerSecond = 123.0,
#' TotalDataInMegaBytes = 123,
#' DataTransferredInMegaBytes = 123,
#' EstimatedTimeToCompletionInSeconds = 123,
#' ElapsedTimeInSeconds = 123
#' ),
#' HsmStatus = list(
#' HsmClientCertificateIdentifier = "string",
#' HsmConfigurationIdentifier = "string",
#' Status = "string"
#' ),
#' ClusterSnapshotCopyStatus = list(
#' DestinationRegion = "string",
#' RetentionPeriod = 123,
#' ManualSnapshotRetentionPeriod = 123,
#' SnapshotCopyGrantName = "string"
#' ),
#' ClusterPublicKey = "string",
#' ClusterNodes = list(
#' list(
#' NodeRole = "string",
#' PrivateIPAddress = "string",
#' PublicIPAddress = "string"
#' )
#' ),
#' ElasticIpStatus = list(
#' ElasticIp = "string",
#' Status = "string"
#' ),
#' ClusterRevisionNumber = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' ),
#' KmsKeyId = "string",
#' EnhancedVpcRouting = TRUE|FALSE,
#' IamRoles = list(
#' list(
#' IamRoleArn = "string",
#' ApplyStatus = "string"
#' )
#' ),
#' PendingActions = list(
#' "string"
#' ),
#' MaintenanceTrackName = "string",
#' ElasticResizeNumberOfNodeOptions = "string",
#' DeferredMaintenanceWindows = list(
#' list(
#' DeferMaintenanceIdentifier = "string",
#' DeferMaintenanceStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' DeferMaintenanceEndTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' ),
#' SnapshotScheduleIdentifier = "string",
#' SnapshotScheduleState = "MODIFYING"|"ACTIVE"|"FAILED",
#' ExpectedNextSnapshotScheduleTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ExpectedNextSnapshotScheduleTimeStatus = "string",
#' NextMaintenanceWindowStartTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ResizeInfo = list(
#' ResizeType = "string",
#' AllowCancelResize = TRUE|FALSE
#' ),
#' AvailabilityZoneRelocationStatus = "string",
#' ClusterNamespaceArn = "string"
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$rotate_encryption_key(
#' ClusterIdentifier = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname redshift_rotate_encryption_key
redshift_rotate_encryption_key <- function(ClusterIdentifier) {
op <- new_operation(
name = "RotateEncryptionKey",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .redshift$rotate_encryption_key_input(ClusterIdentifier = ClusterIdentifier)
output <- .redshift$rotate_encryption_key_output()
config <- get_config()
svc <- .redshift$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.redshift$operations$rotate_encryption_key <- redshift_rotate_encryption_key
|
## ----setup, include = FALSE---------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ---- echo=FALSE, results="hide", warning=FALSE, message=FALSE----------------
library(did)
# Source the currently version of the did package (based on our Dropbox)
# fldr <- here::here("R/")
# sapply(paste0(fldr,list.files(fldr)), source)
# Source simulation designs
source(here::here("vignettes/setup_sims.R"))
## ----echo=FALSE---------------------------------------------------------------
time.periods <- 4
reset.sim()
bett <- betu <- rep(0, time.periods)
te <- 0
set.seed(1814)
## -----------------------------------------------------------------------------
# generate dataset with 4 time periods
time.periods <- 4
# generate dynamic effects
te.e <- time.periods:1
# generate data set with these parameters
# (main thing: it generates a dataset that satisfies
# parallel trends in all periods...including pre-treatment)
data <- build_sim_dataset()
head(data)
## ---- fig.width=8,fig.height=5, fig.align='center', out.width="90%", dpi = 200----
#-----------------------------------------------------------------------------
# modify the dataset a bit so that we can run an event study
#-----------------------------------------------------------------------------
# generate leads and lags of the treatment
Dtl <- sapply(-(time.periods-1):(time.periods-2), function(l) {
dtl <- 1*( (data$period == data$G + l) & (data$G > 0) )
dtl
})
Dtl <- as.data.frame(Dtl)
cnames1 <- paste0("Dtmin",(time.periods-1):1)
colnames(Dtl) <- c(cnames1, paste0("Dt",0:(time.periods-2)))
data <- cbind.data.frame(data, Dtl)
row.names(data) <- NULL
head(data)
#-----------------------------------------------------------------------------
# run the event study regression
#-----------------------------------------------------------------------------
# load plm package
library(plm)
# run event study regression
# normalize effect to be 0 in pre-treatment period
es <- plm(Y ~ Dtmin3 + Dtmin2 + Dt0 + Dt1 + Dt2,
data=data, model="within", effect="twoways",
index=c("id","period"))
summary(es)
#-----------------------------------------------------------------------------
# make an event study plot
#-----------------------------------------------------------------------------
# some housekeeping for making the plot
# add 0 at event time -1
coefs1 <- coef(es)
ses1 <- sqrt(diag(summary(es)$vcov))
idx.pre <- 1:(time.periods-2)
idx.post <- (time.periods-1):length(coefs1)
coefs <- c(coefs1[idx.pre], 0, coefs1[idx.post])
ses <- c(ses1[idx.pre], 0, ses1[idx.post])
exposure <- -(time.periods-1):(time.periods-2)
cmat <- data.frame(coefs=coefs, ses=ses, exposure=exposure)
library(ggplot2)
ggplot(data=cmat, mapping=aes(y=coefs, x=exposure)) +
geom_line(linetype="dashed") +
geom_point() +
geom_errorbar(aes(ymin=(coefs-1.96*ses), ymax=(coefs+1.96*ses)), width=0.2) +
ylim(c(-2,5)) +
theme_bw()
## ---- fig.width=8,fig.height=10, fig.align='center', out.width="90%", dpi = 200----
# estimate group-group time average treatment effects
did_att_gt <- att_gt(yname="Y",
tname="period",
idname="id",
gname="G",
data=data,
bstrap=FALSE,
cband=FALSE)
summary(did_att_gt)
# plot them
ggdid(did_att_gt)
## ---- fig.width=8,fig.height=5, fig.align='center', out.width="90%", dpi = 200----
# aggregate them into event study plot
did_es <- aggte(did_att_gt, type="dynamic")
# plot the event study
ggdid(did_es)
## ---- echo=FALSE--------------------------------------------------------------
reset.sim()
bett <- betu <- rep(0, time.periods)
te <- 0
set.seed(1814)
## -----------------------------------------------------------------------------
# generate dataset with 4 time periods
time.periods <- 4
# generate dynamic effects
te.e <- time.periods:1
# generate selective treatment timing
# (*** this is what is different here ***)
te.bet.ind <- time.periods:1 / (time.periods/2)
# generate data set with these parameters
# (main thing: it generates a dataset that satisfies
# parallel trends in all periods...including pre-treatment)
data <- build_sim_dataset()
## -----------------------------------------------------------------------------
# run through same code as in earlier example...omitted
## ---- echo=FALSE--------------------------------------------------------------
#-----------------------------------------------------------------------------
# modify the dataset a bit so that we can run an event study
#-----------------------------------------------------------------------------
# generate leads and lags of the treatment
Dtl <- sapply(-(time.periods-1):(time.periods-2), function(l) {
dtl <- 1*( (data$period == data$G + l) & (data$G > 0) )
dtl
})
Dtl <- as.data.frame(Dtl)
cnames1 <- paste0("Dtmin",(time.periods-1):1)
colnames(Dtl) <- c(cnames1, paste0("Dt",0:(time.periods-2)))
data <- cbind.data.frame(data, Dtl)
row.names(data) <- NULL
#-----------------------------------------------------------------------------
# run the event study regression
#-----------------------------------------------------------------------------
# load plm package
library(plm)
## -----------------------------------------------------------------------------
# run event study regression
# normalize effect to be 0 in pre-treatment period
es <- plm(Y ~ Dtmin3 + Dtmin2 + Dt0 + Dt1 + Dt2,
data=data, model="within", effect="twoways",
index=c("id","period"))
summary(es)
## ---- echo=FALSE--------------------------------------------------------------
#-----------------------------------------------------------------------------
# make an event study plot
#-----------------------------------------------------------------------------
# some housekeeping for making the plot
# add 0 at event time -1
coefs1 <- coef(es)
ses1 <- sqrt(diag(summary(es)$vcov))
idx.pre <- 1:(time.periods-2)
idx.post <- (time.periods-1):length(coefs1)
coefs <- c(coefs1[idx.pre], 0, coefs1[idx.post])
ses <- c(ses1[idx.pre], 0, ses1[idx.post])
exposure <- -(time.periods-1):(time.periods-2)
cmat <- data.frame(coefs=coefs, ses=ses, exposure=exposure)
## ---- fig.width=8,fig.height=5, fig.align='center', out.width="90%", dpi = 200----
# run through same code as before...omitted
# new event study plot
ggplot(data=cmat, mapping=aes(y=coefs, x=exposure)) +
geom_line(linetype="dashed") +
geom_point() +
geom_errorbar(aes(ymin=(coefs-1.96*ses), ymax=(coefs+1.96*ses)), width=0.2) +
ylim(c(-2,5)) +
theme_bw()
## ---- fig.width=8,fig.height=10, fig.align='center', out.width="90%", dpi = 200----
# estimate group-group time average treatment effects
did.att.gt <- att_gt(yname="Y",
tname="period",
idnam="id",
gname="G",
data=data
)
summary(did.att.gt)
# plot them
ggdid(did.att.gt)
## ---- fig.width=8,fig.height=5, fig.align='center', out.width="90%", dpi = 200----
# aggregate them into event study plot
did.es <- aggte(did.att.gt, type="dynamic")
# plot the event study
ggdid(did.es)
## ----eval=FALSE---------------------------------------------------------------
# # not run (this code can be substantially slower)
# reset.sim()
# set.seed(1814)
# nt <- 1000
# nu <- 1000
# cdp <- conditional_did_pretest("Y", "period", "id", "G", xformla=~X, data=data)
# cdp
| /inst/doc/pre-testing.R | no_license | cran/did | R | false | false | 7,532 | r | ## ----setup, include = FALSE---------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ---- echo=FALSE, results="hide", warning=FALSE, message=FALSE----------------
library(did)
# Source the currently version of the did package (based on our Dropbox)
# fldr <- here::here("R/")
# sapply(paste0(fldr,list.files(fldr)), source)
# Source simulation designs
source(here::here("vignettes/setup_sims.R"))
## ----echo=FALSE---------------------------------------------------------------
time.periods <- 4
reset.sim()
bett <- betu <- rep(0, time.periods)
te <- 0
set.seed(1814)
## -----------------------------------------------------------------------------
# generate dataset with 4 time periods
time.periods <- 4
# generate dynamic effects
te.e <- time.periods:1
# generate data set with these parameters
# (main thing: it generates a dataset that satisfies
# parallel trends in all periods...including pre-treatment)
data <- build_sim_dataset()
head(data)
## ---- fig.width=8,fig.height=5, fig.align='center', out.width="90%", dpi = 200----
#-----------------------------------------------------------------------------
# modify the dataset a bit so that we can run an event study
#-----------------------------------------------------------------------------
# generate leads and lags of the treatment
Dtl <- sapply(-(time.periods-1):(time.periods-2), function(l) {
dtl <- 1*( (data$period == data$G + l) & (data$G > 0) )
dtl
})
Dtl <- as.data.frame(Dtl)
cnames1 <- paste0("Dtmin",(time.periods-1):1)
colnames(Dtl) <- c(cnames1, paste0("Dt",0:(time.periods-2)))
data <- cbind.data.frame(data, Dtl)
row.names(data) <- NULL
head(data)
#-----------------------------------------------------------------------------
# run the event study regression
#-----------------------------------------------------------------------------
# load plm package
library(plm)
# run event study regression
# normalize effect to be 0 in pre-treatment period
es <- plm(Y ~ Dtmin3 + Dtmin2 + Dt0 + Dt1 + Dt2,
data=data, model="within", effect="twoways",
index=c("id","period"))
summary(es)
#-----------------------------------------------------------------------------
# make an event study plot
#-----------------------------------------------------------------------------
# some housekeeping for making the plot
# add 0 at event time -1
coefs1 <- coef(es)
ses1 <- sqrt(diag(summary(es)$vcov))
idx.pre <- 1:(time.periods-2)
idx.post <- (time.periods-1):length(coefs1)
coefs <- c(coefs1[idx.pre], 0, coefs1[idx.post])
ses <- c(ses1[idx.pre], 0, ses1[idx.post])
exposure <- -(time.periods-1):(time.periods-2)
cmat <- data.frame(coefs=coefs, ses=ses, exposure=exposure)
library(ggplot2)
ggplot(data=cmat, mapping=aes(y=coefs, x=exposure)) +
geom_line(linetype="dashed") +
geom_point() +
geom_errorbar(aes(ymin=(coefs-1.96*ses), ymax=(coefs+1.96*ses)), width=0.2) +
ylim(c(-2,5)) +
theme_bw()
## ---- fig.width=8,fig.height=10, fig.align='center', out.width="90%", dpi = 200----
# estimate group-group time average treatment effects
did_att_gt <- att_gt(yname="Y",
tname="period",
idname="id",
gname="G",
data=data,
bstrap=FALSE,
cband=FALSE)
summary(did_att_gt)
# plot them
ggdid(did_att_gt)
## ---- fig.width=8,fig.height=5, fig.align='center', out.width="90%", dpi = 200----
# aggregate them into event study plot
did_es <- aggte(did_att_gt, type="dynamic")
# plot the event study
ggdid(did_es)
## ---- echo=FALSE--------------------------------------------------------------
reset.sim()
bett <- betu <- rep(0, time.periods)
te <- 0
set.seed(1814)
## -----------------------------------------------------------------------------
# generate dataset with 4 time periods
time.periods <- 4
# generate dynamic effects
te.e <- time.periods:1
# generate selective treatment timing
# (*** this is what is different here ***)
te.bet.ind <- time.periods:1 / (time.periods/2)
# generate data set with these parameters
# (main thing: it generates a dataset that satisfies
# parallel trends in all periods...including pre-treatment)
data <- build_sim_dataset()
## -----------------------------------------------------------------------------
# run through same code as in earlier example...omitted
## ---- echo=FALSE--------------------------------------------------------------
#-----------------------------------------------------------------------------
# modify the dataset a bit so that we can run an event study
#-----------------------------------------------------------------------------
# generate leads and lags of the treatment
Dtl <- sapply(-(time.periods-1):(time.periods-2), function(l) {
dtl <- 1*( (data$period == data$G + l) & (data$G > 0) )
dtl
})
Dtl <- as.data.frame(Dtl)
cnames1 <- paste0("Dtmin",(time.periods-1):1)
colnames(Dtl) <- c(cnames1, paste0("Dt",0:(time.periods-2)))
data <- cbind.data.frame(data, Dtl)
row.names(data) <- NULL
#-----------------------------------------------------------------------------
# run the event study regression
#-----------------------------------------------------------------------------
# load plm package
library(plm)
## -----------------------------------------------------------------------------
# run event study regression
# normalize effect to be 0 in pre-treatment period
es <- plm(Y ~ Dtmin3 + Dtmin2 + Dt0 + Dt1 + Dt2,
data=data, model="within", effect="twoways",
index=c("id","period"))
summary(es)
## ---- echo=FALSE--------------------------------------------------------------
#-----------------------------------------------------------------------------
# make an event study plot
#-----------------------------------------------------------------------------
# some housekeeping for making the plot
# add 0 at event time -1
coefs1 <- coef(es)
ses1 <- sqrt(diag(summary(es)$vcov))
idx.pre <- 1:(time.periods-2)
idx.post <- (time.periods-1):length(coefs1)
coefs <- c(coefs1[idx.pre], 0, coefs1[idx.post])
ses <- c(ses1[idx.pre], 0, ses1[idx.post])
exposure <- -(time.periods-1):(time.periods-2)
cmat <- data.frame(coefs=coefs, ses=ses, exposure=exposure)
## ---- fig.width=8,fig.height=5, fig.align='center', out.width="90%", dpi = 200----
# run through same code as before...omitted
# new event study plot
ggplot(data=cmat, mapping=aes(y=coefs, x=exposure)) +
geom_line(linetype="dashed") +
geom_point() +
geom_errorbar(aes(ymin=(coefs-1.96*ses), ymax=(coefs+1.96*ses)), width=0.2) +
ylim(c(-2,5)) +
theme_bw()
## ---- fig.width=8,fig.height=10, fig.align='center', out.width="90%", dpi = 200----
# estimate group-group time average treatment effects
did.att.gt <- att_gt(yname="Y",
tname="period",
idnam="id",
gname="G",
data=data
)
summary(did.att.gt)
# plot them
ggdid(did.att.gt)
## ---- fig.width=8,fig.height=5, fig.align='center', out.width="90%", dpi = 200----
# aggregate them into event study plot
did.es <- aggte(did.att.gt, type="dynamic")
# plot the event study
ggdid(did.es)
## ----eval=FALSE---------------------------------------------------------------
# # not run (this code can be substantially slower)
# reset.sim()
# set.seed(1814)
# nt <- 1000
# nu <- 1000
# cdp <- conditional_did_pretest("Y", "period", "id", "G", xformla=~X, data=data)
# cdp
|
#!/usr/bin/env Rscript
#' report calculate result
#' @export
#' @param workshop_dir Directory of workshop
#' @param csv_file The file which will contain the output result (CSV format)
report_analysis <- function(workshop_dir, csv_file) {
db_object <- connect_database(workshop_dir)
sql_string_bare <- "SELECT * FROM `analysis_result`;"
sql_string <- sprintf(sql_string_bare)
result_obj <- dbSendQuery(db_object, sql_string)
result_data <- dbFetch(result_obj)
dbClearResult(result_obj)
dbDisconnect(db_object)
write.table(result_data, file=csv_file, sep=',', row.names=FALSE)
return(NULL)
}
| /src/R/report_analysis.R | no_license | howl-anderson/sdmanalysis | R | false | false | 630 | r | #!/usr/bin/env Rscript
#' report calculate result
#' @export
#' @param workshop_dir Directory of workshop
#' @param csv_file The file which will contain the output result (CSV format)
report_analysis <- function(workshop_dir, csv_file) {
db_object <- connect_database(workshop_dir)
sql_string_bare <- "SELECT * FROM `analysis_result`;"
sql_string <- sprintf(sql_string_bare)
result_obj <- dbSendQuery(db_object, sql_string)
result_data <- dbFetch(result_obj)
dbClearResult(result_obj)
dbDisconnect(db_object)
write.table(result_data, file=csv_file, sep=',', row.names=FALSE)
return(NULL)
}
|
# Visualization
#ggplot2
install.packages("ggplot2")
library(ggplot2)
data(ChickWeight)
head(ChickWeight)
ggplot(ChickWeight, aes(x=Time, y=weight, colour=Diet, group=Chick)) + geom_line()
# geom_point() : scatter plot, geom_smooth(): 배경색상 투명도(alpah), 평균 값 선 굵기(size) 조정
ggplot(ChickWeight, aes(x=Time, y=weight, colour=Diet)) + geom_point(alpha=.3) + geom_smooth(alpha=.2, size=1)
# point graph
h <- ggplot(ChickWeight, aes(x=Time, y=weight, colour=Diet))
h+geom_point(alpha=0.3)
# smooth graph
h <- ggplot(ChickWeight, aes(x=Time, y=weight, colour=Diet))
h + geom_smooth(alpha=0.4, size=3)
# Histogram
# Time=21일 때, 무게 분포도
ggplot(subset(ChickWeight, Time==21), aes(x=weight, colour=Diet)) + geom_density()
h <- ggplot(subset(ChickWeight, Time==21), aes(x=weight, fill=Diet))
# facet_grid(Diet~.) : 가로 / facet_grid(.~Diet) : 세로
h + geom_histogram(colour='black', binwidth=50) + facet_grid(Diet~.)
h + geom_histogram(colour='black', binwidth=50) + facet_grid(.~Diet)
# Point Graph
data(mtcars)
head(mtcars)
help(mtcars)
p <- qplot(wt, mpg, colour = hp, data=mtcars)
p + coord_cartesian(ylim = c(0,40)) # y 축 range
p + scale_colour_continuous(breaks = c(100,300)) # hp range
p + guides(colour = "colourbar")
m <- mtcars[1:10, ]
p%+%m # 10개 데이터 무게별 연비 그래프
# Bar graph
c <- ggplot(mtcars, aes(factor(cyl)))
c + geom_bar()
c + geom_bar(fill="red") # 내부색 바꾸기
c + geom_bar(colour = "red") # line color
c + geom_bar(fill="white", colour="red")
k <- ggplot(mtcars, aes(factor(cyl), fill=factor(vs))) # fill option
k + geom_bar()
# FIXME : !Data 없음!
#m <- ggplot(movies, aes(x=rating))
# m + geom_histogram()
# m + geom_histogram(aes(fill= ..count..))
# Line graph
data(economics)
head(economics)
b <- ggplot(economics, aes(x = date, y=unemploy))
b + geom_line()
b + geom_line(colour = "red")
b + geom_line(colour = 'red', size = 3)
b + geom_line(linetype=2)
b + geom_line(linetype=1)
b + geom_line(linetype=2)
b + geom_line(linetype=3)
b + geom_line(linetype=4)
# 효과주기
df <- data.frame(x =rnorm(5000), y = rnorm(5000))
h <- ggplot(df, aes(x,y))
h + geom_point()
h + geom_point(alpha = 0.5)
h + geom_point(alpha = 1/10)
p <- ggplot(mtcars, aes(wt, mpg))
p + geom_point(size=4)
p + geom_point(aes(colour=factor(cyl)), size=4)
p + geom_point(aes(shape=factor(cyl)), size=4)
# reshape2, plyr
install.packages("reshape2")
install.packages("pylr")
library(reshape2)
library(plyr)
rescale01 <- function(x) (x-min(x)) / diff(range(x))
ec_scaled <- data.frame(date = economics$date, colwise(rescale01)(economics[, -(1:2)]))
ecm <- melt(ec_scaled, id = "date")
f <- ggplot(ecm, aes(date, value))
f + geom_line(aes(linetype = variable))
data(diamonds)
head(diamonds)
k <- ggplot(diamonds, aes(carat, ..density..)) + geom_histogram(binwidth=0.2)
k + facet_grid(.~cut)
w <- ggplot(diamonds, aes(clarity, fill=cut))
w + geom_bar()
# w + geom_bar(aes(order = desc(cut)))
df <- data.frame(x=1:10, y=1:10)
f <- ggplot(df, aes(x=x, y=y))
f + geom_line(linetype=2)
f+ geom_line(linetype = "dotdash")
p <- ggplot(mtcars, aes(wt, mpg))
p + geom_point(size=4)
p + geom_point(aes(size=qsec)) # qsec별 point 크기 다름
# Ponint + Line(임의)
p + geom_point(size=2.5) + geom_hline(yintercept = 25, size=3.5)
p + geom_point()
p + geom_point(shape=5)
p + geom_point(shape="k", size=3)
p + geom_point(shape=".")
p + geom_point(shape=NA)
# Various shapes
df2 <- data.frame(x = 1:5, y=1:25, z= 1:25)
s <- ggplot(df2, aes(x=x, y=y))
s+ geom_point(aes(shape=z), size=4) + scale_shape_identity()
# point range graph
dmod <- lm(price ~ cut, data=diamonds)
cuts <- data.frame(cut = unique(diamonds$cut),
predict(dmod, data.frame(cut = unique(diamonds$cut)),
se=TRUE)[c("fit", "se.fit")])
se <- ggplot(cuts, aes(x=cut, y = fit, ymin=fit-se.fit,
ymax = fit+se.fit, colour = cut))
se + geom_pointrange()
# 특정 영역 강조
p <- ggplot(mtcars, aes(wt, mpg)) + geom_point()
p + annotate("rect", xmin=2, xmax=3.5, ymin = 2, ymax=25,
fill="black", alpha=.6)
# qplot + smooth
p <- qplot(disp, wt, data=mtcars) + geom_smooth()
p
# limits 원하는 부분만
p + scale_x_continuous(limits = c(325, 500))
# 확대
d <- ggplot(diamonds, aes(carat, price))
d + stat_bin2d(bins=25, colour="grey50")
d + stat_bin2d(bins=25, colour="grey50")+scale_x_continuous(limits=c(0,2))
# boxplot
qplot(cut, price, data=diamonds, geom="boxplot") # 세로
last_plot() + coord_flip() # 가로
qplot(cut, data = diamonds, geom="bar")
h <- qplot(carat, data=diamonds, geom="histogram")
h + coord_flip()
h + coord_flip() + scale_x_reverse()
# Mutiple Axis
# multiple y axis in plot.R
time <- seq(7000, 3400, -200)
pop <- c(200, 400, 450, 500, 300, 100, 400, 700, 830, 1200, 400,
350, 200, 700, 370, 800, 200, 100, 120)
grp <- c(2,5,8,3,2,2,4,7,9,4,4,2,2,7,5,12,5,4,4)
med <- c(1.2, 1.3, 1.2, 0.9, 2.1, 1.4, 2.9, 3.4, 2.1,
1.1, 1.2, 1.5, 1.2, 0.9, 0.5, 3.3, 2.2, 1.1, 1.2)
par(mar=c(5,12,4,4)+0.1)
plot(time, pop, axes=F, ylim=c(0, max(pop)), xlab="", ylab="",
type="l", col="black",main="", xlim=c(7000, 3400))
points(time, pop, pch=20, col="black")
axis(2, ylim=c(0, max(pop)), col="black", lwd=2)
mtext(2, text="Population", line=2)
par(new=T)
plot(time, med, axes=F, ylim=c(0, max(med)), xlab="", ylab="",
type="l", lty=2, main="", xlim=c(7000, 3400), lwd=2)
axis(2, ylim=c(0, max(med)), lwd=2, line=3.5)
points(time, med, pch=20)
mtext(2, text="Median Group Size", line=5.5)
par(new=T)
plot(time, grp, axes=F, ylim=c(0, max(grp)), xlab="", ylab="",
type="l", lty=3, main="", xlim=c(7000, 3400), lwd=2)
axis(2, ylim=c(0, max(grp)), lwd=2, line=7)
points(time, grp, pch=20)
mtext(2, text="Number of Groups", line=9)
axis(1, pretty(range(time), 10))
mtext("cal BP", side=1, col="black", line=2)
legend(x=7000, y=12, legend=c("Population", "Median Group Size", "Number of Groups"), lty=c(1,2,3))
| /visualization_basics.R | no_license | hwang-dasom/adp_r_practices | R | false | false | 6,004 | r | # Visualization
#ggplot2
install.packages("ggplot2")
library(ggplot2)
data(ChickWeight)
head(ChickWeight)
ggplot(ChickWeight, aes(x=Time, y=weight, colour=Diet, group=Chick)) + geom_line()
# geom_point() : scatter plot, geom_smooth(): 배경색상 투명도(alpah), 평균 값 선 굵기(size) 조정
ggplot(ChickWeight, aes(x=Time, y=weight, colour=Diet)) + geom_point(alpha=.3) + geom_smooth(alpha=.2, size=1)
# point graph
h <- ggplot(ChickWeight, aes(x=Time, y=weight, colour=Diet))
h+geom_point(alpha=0.3)
# smooth graph
h <- ggplot(ChickWeight, aes(x=Time, y=weight, colour=Diet))
h + geom_smooth(alpha=0.4, size=3)
# Histogram
# Time=21일 때, 무게 분포도
ggplot(subset(ChickWeight, Time==21), aes(x=weight, colour=Diet)) + geom_density()
h <- ggplot(subset(ChickWeight, Time==21), aes(x=weight, fill=Diet))
# facet_grid(Diet~.) : 가로 / facet_grid(.~Diet) : 세로
h + geom_histogram(colour='black', binwidth=50) + facet_grid(Diet~.)
h + geom_histogram(colour='black', binwidth=50) + facet_grid(.~Diet)
# Point Graph
data(mtcars)
head(mtcars)
help(mtcars)
p <- qplot(wt, mpg, colour = hp, data=mtcars)
p + coord_cartesian(ylim = c(0,40)) # y 축 range
p + scale_colour_continuous(breaks = c(100,300)) # hp range
p + guides(colour = "colourbar")
m <- mtcars[1:10, ]
p%+%m # 10개 데이터 무게별 연비 그래프
# Bar graph
c <- ggplot(mtcars, aes(factor(cyl)))
c + geom_bar()
c + geom_bar(fill="red") # 내부색 바꾸기
c + geom_bar(colour = "red") # line color
c + geom_bar(fill="white", colour="red")
k <- ggplot(mtcars, aes(factor(cyl), fill=factor(vs))) # fill option
k + geom_bar()
# FIXME : !Data 없음!
#m <- ggplot(movies, aes(x=rating))
# m + geom_histogram()
# m + geom_histogram(aes(fill= ..count..))
# Line graph
data(economics)
head(economics)
b <- ggplot(economics, aes(x = date, y=unemploy))
b + geom_line()
b + geom_line(colour = "red")
b + geom_line(colour = 'red', size = 3)
b + geom_line(linetype=2)
b + geom_line(linetype=1)
b + geom_line(linetype=2)
b + geom_line(linetype=3)
b + geom_line(linetype=4)
# 효과주기
df <- data.frame(x =rnorm(5000), y = rnorm(5000))
h <- ggplot(df, aes(x,y))
h + geom_point()
h + geom_point(alpha = 0.5)
h + geom_point(alpha = 1/10)
p <- ggplot(mtcars, aes(wt, mpg))
p + geom_point(size=4)
p + geom_point(aes(colour=factor(cyl)), size=4)
p + geom_point(aes(shape=factor(cyl)), size=4)
# reshape2, plyr
install.packages("reshape2")
install.packages("pylr")
library(reshape2)
library(plyr)
rescale01 <- function(x) (x-min(x)) / diff(range(x))
ec_scaled <- data.frame(date = economics$date, colwise(rescale01)(economics[, -(1:2)]))
ecm <- melt(ec_scaled, id = "date")
f <- ggplot(ecm, aes(date, value))
f + geom_line(aes(linetype = variable))
data(diamonds)
head(diamonds)
k <- ggplot(diamonds, aes(carat, ..density..)) + geom_histogram(binwidth=0.2)
k + facet_grid(.~cut)
w <- ggplot(diamonds, aes(clarity, fill=cut))
w + geom_bar()
# w + geom_bar(aes(order = desc(cut)))
df <- data.frame(x=1:10, y=1:10)
f <- ggplot(df, aes(x=x, y=y))
f + geom_line(linetype=2)
f+ geom_line(linetype = "dotdash")
p <- ggplot(mtcars, aes(wt, mpg))
p + geom_point(size=4)
p + geom_point(aes(size=qsec)) # qsec별 point 크기 다름
# Ponint + Line(임의)
p + geom_point(size=2.5) + geom_hline(yintercept = 25, size=3.5)
p + geom_point()
p + geom_point(shape=5)
p + geom_point(shape="k", size=3)
p + geom_point(shape=".")
p + geom_point(shape=NA)
# Various shapes
df2 <- data.frame(x = 1:5, y=1:25, z= 1:25)
s <- ggplot(df2, aes(x=x, y=y))
s+ geom_point(aes(shape=z), size=4) + scale_shape_identity()
# point range graph
dmod <- lm(price ~ cut, data=diamonds)
cuts <- data.frame(cut = unique(diamonds$cut),
predict(dmod, data.frame(cut = unique(diamonds$cut)),
se=TRUE)[c("fit", "se.fit")])
se <- ggplot(cuts, aes(x=cut, y = fit, ymin=fit-se.fit,
ymax = fit+se.fit, colour = cut))
se + geom_pointrange()
# 특정 영역 강조
p <- ggplot(mtcars, aes(wt, mpg)) + geom_point()
p + annotate("rect", xmin=2, xmax=3.5, ymin = 2, ymax=25,
fill="black", alpha=.6)
# qplot + smooth
p <- qplot(disp, wt, data=mtcars) + geom_smooth()
p
# limits 원하는 부분만
p + scale_x_continuous(limits = c(325, 500))
# 확대
d <- ggplot(diamonds, aes(carat, price))
d + stat_bin2d(bins=25, colour="grey50")
d + stat_bin2d(bins=25, colour="grey50")+scale_x_continuous(limits=c(0,2))
# boxplot
qplot(cut, price, data=diamonds, geom="boxplot") # 세로
last_plot() + coord_flip() # 가로
qplot(cut, data = diamonds, geom="bar")
h <- qplot(carat, data=diamonds, geom="histogram")
h + coord_flip()
h + coord_flip() + scale_x_reverse()
# Mutiple Axis
# multiple y axis in plot.R
time <- seq(7000, 3400, -200)
pop <- c(200, 400, 450, 500, 300, 100, 400, 700, 830, 1200, 400,
350, 200, 700, 370, 800, 200, 100, 120)
grp <- c(2,5,8,3,2,2,4,7,9,4,4,2,2,7,5,12,5,4,4)
med <- c(1.2, 1.3, 1.2, 0.9, 2.1, 1.4, 2.9, 3.4, 2.1,
1.1, 1.2, 1.5, 1.2, 0.9, 0.5, 3.3, 2.2, 1.1, 1.2)
par(mar=c(5,12,4,4)+0.1)
plot(time, pop, axes=F, ylim=c(0, max(pop)), xlab="", ylab="",
type="l", col="black",main="", xlim=c(7000, 3400))
points(time, pop, pch=20, col="black")
axis(2, ylim=c(0, max(pop)), col="black", lwd=2)
mtext(2, text="Population", line=2)
par(new=T)
plot(time, med, axes=F, ylim=c(0, max(med)), xlab="", ylab="",
type="l", lty=2, main="", xlim=c(7000, 3400), lwd=2)
axis(2, ylim=c(0, max(med)), lwd=2, line=3.5)
points(time, med, pch=20)
mtext(2, text="Median Group Size", line=5.5)
par(new=T)
plot(time, grp, axes=F, ylim=c(0, max(grp)), xlab="", ylab="",
type="l", lty=3, main="", xlim=c(7000, 3400), lwd=2)
axis(2, ylim=c(0, max(grp)), lwd=2, line=7)
points(time, grp, pch=20)
mtext(2, text="Number of Groups", line=9)
axis(1, pretty(range(time), 10))
mtext("cal BP", side=1, col="black", line=2)
legend(x=7000, y=12, legend=c("Population", "Median Group Size", "Number of Groups"), lty=c(1,2,3))
|
#' Returns the number of chunks in a disk.frame
#' @param df a disk.frame
#' @param skip.ready.check NOT implemented
#' @param ... not used
#' @export
#' @examples
#' cars.df = as.disk.frame(cars)
#'
#' # return the number of chunks
#' nchunks(cars.df)
#' nchunk(cars.df)
#'
#' # clean up cars.df
#' delete(cars.df)
nchunks <- function(df, ...) {
UseMethod("nchunks")
}
#' @rdname nchunks
#' @export
nchunk <- function(df, ...) {
UseMethod("nchunk")
}
#' @rdname nchunks
#' @export
nchunk.disk.frame <- function(df, ...) {
nchunks.disk.frame(df, ...)
}
#' @importFrom fs dir_ls
#' @rdname nchunks
#' @export
nchunks.disk.frame <- function(df, skip.ready.check = FALSE, ...) {
#if(!skip.ready.check) stopifnot(is_ready(df))
fpath <- attr(df,"path", exact=TRUE)
if(is.dir.disk.frame(df)) {
return(length(fs::dir_ls(fpath, type="file")))
} else {
return(1)
}
} | /fuzzedpackages/disk.frame/R/nchunks.r | no_license | akhikolla/testpackages | R | false | false | 929 | r | #' Returns the number of chunks in a disk.frame
#' @param df a disk.frame
#' @param skip.ready.check NOT implemented
#' @param ... not used
#' @export
#' @examples
#' cars.df = as.disk.frame(cars)
#'
#' # return the number of chunks
#' nchunks(cars.df)
#' nchunk(cars.df)
#'
#' # clean up cars.df
#' delete(cars.df)
nchunks <- function(df, ...) {
UseMethod("nchunks")
}
#' @rdname nchunks
#' @export
nchunk <- function(df, ...) {
UseMethod("nchunk")
}
#' @rdname nchunks
#' @export
nchunk.disk.frame <- function(df, ...) {
nchunks.disk.frame(df, ...)
}
#' @importFrom fs dir_ls
#' @rdname nchunks
#' @export
nchunks.disk.frame <- function(df, skip.ready.check = FALSE, ...) {
#if(!skip.ready.check) stopifnot(is_ready(df))
fpath <- attr(df,"path", exact=TRUE)
if(is.dir.disk.frame(df)) {
return(length(fs::dir_ls(fpath, type="file")))
} else {
return(1)
}
} |
rm(list=ls())
library(shiny)
setwd("C:/Users/bclamber/Desktop/distribution-viewer")
runApp("App-1", launch.browser = T)
runApp(list(
ui = bootstrapPage(
sliderInput("mu", "Mean", min=-30, max=30, value=0, step=0.2),
uiOutput('chunk')
),
server = function(input, output) {
output$chunk <- renderUI({
prismCodeBlock(
code=paste0("dnorm(0, ", input$mu,", 2)"),
language="r") })
}
))
prismDependencies <- tags$head(
tags$script(src = "https://cdnjs.cloudflare.com/ajax/libs/prism/1.8.4/prism.min.js"),
tags$link(rel = "stylesheet", type = "text/css",
href = "https://cdnjs.cloudflare.com/ajax/libs/prism/1.8.4/themes/prism.min.css")
)
prismLanguageDependencies <- function(languages) {
lapply(languages, function(x) {
tags$head(
tags$script(
src = paste0("https://cdnjs.cloudflare.com/ajax/libs/prism/1.8.4/components/prism-",
x, ".min.js")
)
)
})
}
## format code with tags and language
prismAddTags <- function(code, language = "r") {
paste0("<pre><code class = 'language-", language, "'>",
code,
"</code></pre>")
}
prismCodeBlock <- function(code, language = "r") {
tagList(
HTML(prismAddTags(code, language = language)),
tags$script("Prism.highlightAll()")
)
}
## run app
library(shiny)
runApp(list(
ui = bootstrapPage(
prismDependencies,
prismLanguageDependencies(c("sql", "r", "python")),
sliderInput("mu", "Mean", min=-30, max=30, value=0, step=0.2),
uiOutput('r_chunk'),
uiOutput('python_chunk'),
uiOutput('sql_chunk')
),
server = function(input, output) {
output$r_chunk <- renderUI({
prismCodeBlock(
code = paste0("# this is R code\ndnorm(0, ", input$mu,", 2)"),
language = "r"
)
})
output$python_chunk <- renderUI({
prismCodeBlock(
code = '# this is python code
# Say hello, world.
print ("Hello, world!")',
language = "python"
)
})
output$sql_chunk <- renderUI({
prismCodeBlock(
code = "-- this is SQL code
SELECT * FROM mytable WHERE 1=2",
language = "sql"
)
})
}
))
n <- 10000
aMu <- rnorm(n)
aVar <- rgamma(n,shape = 1)
lResult <- rnorm(n,aMu,aVar)
hist(rexp(1000,aVar),100)
fSelectDistribution <- function(aDist){
lResult <- switch(aDist,
normal=dnorm,
gamma=dgamma,
uniform=dunif,
t = dt,
exponential=dexp,
beta=dbeta,
dnorm)
return(lResult)
}
aDist <- fSelectDistribution("t")
plot(seq(0,10,0.1),unlist(lapply(seq(0,10,0.1), function(x) aDist(x,df=1))))
lPMF <- dpois(lScale,1)
dataF <- data.frame(a=lScale,pmf=lPMF)
ggplot(data=dataF, aes(x=factor(a), y=pmf)) +
geom_bar(stat="identity", position=position_dodge()) + xlab('X')
library(mvtnorm)
x.points <- seq(-3,3,length.out=100)
y.points <- x.points
z <- matrix(0,nrow=100,ncol=100)
mu <- c(1,1)
sigma <- matrix(c(1,1,1,1),nrow=2)
for (i in 1:100) {
for (j in 1:100) {
z[i,j] <- mvtnorm::dmvt(c(x.points[i],y.points[j]),
delta=c(0,0),sigma=matrix(c(1,0,0,1),nrow=2),df=10,log=FALSE)
}
}
dataF <- melt(z)
dataF$X1<-rep(x.points,100)
dataF$X2 <- unlist(lapply(x.points,function(x) rep(x,100)))
names(dataF) <- c("x", "y", "z")
v <- ggplot(dataF, aes(x, y, z = z))
v + stat_contour(geom="polygon", aes(fill=..level..))+ stat_contour()
library(reshape)
pmvnorm(lower=c(-Inf,-Inf),upper=c(0,0),mean=c(0,0),sigma=matrix(c(1,0,0,1),nrow=2))[1]
install.packages('plotly')
runApp("Plotly-test",display.mode = "showcase")
library(plotly)
set.seed(100)
d <- diamonds[sample(nrow(diamonds), 1000), ]
p <- plot_ly(midwest, x = percollege, color = state, type = "box")
p
Sys.setenv("plotly_username"="ben18785")
Sys.setenv("plotly_api_key"="gjue7w0w41")
n <- 10000
lWishart<-rWishart(n,8,diag(6))
lSigma1 <- sqrt(lWishart[1,1,])
lSigma2 <- sqrt(lWishart[2,2,])
lRho12 <- lWishart[1,2,]/(lSigma1 * lSigma2)
hist(lRho12)
hist(lWishart[1,1,])
n <- 10000
lList<-rWishart(n,5,diag(4))
lOffD <- vector(length=n)
lOffD1 <- vector(length=n)
lDiag <- vector(length=n)
for (i in 1:n){
aMatrix <- solve(lList[,,i])
lOffD[i] <- aMatrix[1,2]/sqrt(aMatrix[1,1]*aMatrix[2,2])
lOffD1[i] <- aMatrix[2,3]/sqrt(aMatrix[2,2]*aMatrix[3,3])
lDiag[i] <- aMatrix[1,1]
}
hist(lOffD)
hist(lOffD1)
hist(log(lDiag))
library(MASS)
a<-kde2d(lOffD,lOffD1,n=50)
image(a)
h1 <- qplot(lOffD)
h2 <- hist(lOffD1, breaks=25, plot=F)
top <- max(h1$counts, h2$counts)
k <- kde2d(lOffD, lOffD1, n=50)
k <- m
# margins
oldpar <- par()
par(mar=c(3,3,1,1))
layout(matrix(c(2,0,1,3),2,2,byrow=T),c(3,1), c(1,3))
k #plot the image
par(mar=c(0,2,1,0))
barplot(h1$counts, axes=F, ylim=c(0, top), space=0, col='red')
par(mar=c(2,0,0.5,1))
barplot(h2$counts, axes=F, xlim=c(0, top), space=0, col='red', horiz=T)
library(ggplot2)
library(ggExtra)
library(grid)
library(gridExtra)
aDataF <- data.frame(x=lOffD,y=lOffD1)
m <- ggplot(aDataF, aes(x = x, y = y)) +
geom_point() + xlab(expression(rho[12])) + ylab(expression(rho[23]))
p <- m + geom_density2d(size=1)
p1<-ggExtra::ggMarginal(p,type = "histogram",fill=I("blue"))
grid.arrange(h1,p1,ncol = 2, top = "Main title")
p1 = qplot(1:10, rnorm(10))
p2 = qplot(1:10, rnorm(10))
hist(lList[1,2,]/sqrt(lList[1,1,]*lList[2,2,]))
lIWishart <- lapply(seq(1,10000,1),function(x) rinvwishart(8,diag(4)))
lVec <- vector(length = 10000)
for (i in 1:10000){
lVec[i]<-lIWishart[[i]][1,1]
}
hist(lVec)
install.packages('VarianceGamma')
library(VarianceGamma)
install.packages('grDevices')
### Code for plotting inverse Wishart
lList<-rWishart(input$sampleSizeInvWish,1,diag(4))
lOffD <- vector(length=input$sampleSizeInvWish)
lOffD1 <- vector(length=input$sampleSizeInvWish)
lDiag <- vector(length=input$sampleSizeInvWish)
for (i in 1:input$sampleSizeInvWish){
aMatrix <- lList[,,i]
lOffD[i] <- aMatrix[1,2]/sqrt(aMatrix[1,1]*aMatrix[2,2])
lOffD1[i] <- aMatrix[2,3]/sqrt(aMatrix[2,2]*aMatrix[3,3])
lDiag[i] <- aMatrix[1,1]
}
h1 <- qplot(log(lDiag),fill=I("blue")) + xlab(expression(paste("log(",sigma[1]^2,")")))
aDataF <- data.frame(x=lOffD,y=lOffD1)
m <- ggplot(aDataF, aes(x = x, y = y)) +
geom_point() + xlab(expression(rho[12])) + ylab(expression(rho[23]))
p <- m + geom_density2d(size=1)
p1<-ggExtra::ggMarginal(p,type = "histogram",fill=I("blue"))
grid.arrange(h1,p1,ncol = 2)
a1<- a2<- a3<- 1
a2<- .5
a3<- .5
x1<- x2<- seq(0.01, .99, by=.01)
f<- function(x1, x2){
term1<- gamma(a1+a2+a3)/(gamma(a1)*gamma(a2)*gamma(a3))
term2<- x1^(a1-1)*x2^(a2-1)*(1-x1-x2)^(a3-1)
term3<- (x1 + x2< 1)
term1*term2*term3
}
z<- outer(x1, x2, f)
z[z<=0]<- NA
persp(x1, x2, z,main = "Dirichlet Distribution",
col = "lightblue",
theta = 100,
phi = 20,
r = 50,
d = 0.1,
expand = 0.5,
ltheta = 90,
lphi = 180,
shade = 0.75,
ticktype = "detailed",
nticks = 5,
zlim = if(length(na.omit(unique(as.vector(z))))< 2){ c(0,2.1) }else { NULL})
library(DirichletReg)
plot(DR_data(rdirichlet(1000, c(1,1))))
plot(DR_data(rdirichlet(1000, c(4,1,1))), a2d = list(colored = FALSE))
output$myWebGL <- renderWebGL({
points3d(1:10, 1:10, 1:10)
axes3d()
})
install.packages('shinyRGL')
library(shinyRGL)
library(scatterplot3d)
X <- t(as.matrix(expand.grid(0:6, 0:6)))
X <- X[ , colSums(X) <= 6]; X <- rbind(X, 6 - colSums(X))
Z <- round(apply(X, 2, function(x) dmultinom(x, prob = 1:3)), 3)
A <- data.frame(x = X[1, ], y = X[2, ], probability = Z)
scatterplot3d(A, type = "h", lwd = 3, box = FALSE,angle=300)
lUpperSigma <- rgamma(input$sampleSize,1,1)
lData <- rnorm(1000)
lData <- data.frame(data=lData)
ggplot(lData, aes(data)) +
geom_histogram(fill=I("blue"))
install.packages('d3Network')
| /s_Distributions_test.R | no_license | ben18785/distribution-zoo | R | false | false | 8,000 | r | rm(list=ls())
library(shiny)
setwd("C:/Users/bclamber/Desktop/distribution-viewer")
runApp("App-1", launch.browser = T)
runApp(list(
ui = bootstrapPage(
sliderInput("mu", "Mean", min=-30, max=30, value=0, step=0.2),
uiOutput('chunk')
),
server = function(input, output) {
output$chunk <- renderUI({
prismCodeBlock(
code=paste0("dnorm(0, ", input$mu,", 2)"),
language="r") })
}
))
prismDependencies <- tags$head(
tags$script(src = "https://cdnjs.cloudflare.com/ajax/libs/prism/1.8.4/prism.min.js"),
tags$link(rel = "stylesheet", type = "text/css",
href = "https://cdnjs.cloudflare.com/ajax/libs/prism/1.8.4/themes/prism.min.css")
)
prismLanguageDependencies <- function(languages) {
lapply(languages, function(x) {
tags$head(
tags$script(
src = paste0("https://cdnjs.cloudflare.com/ajax/libs/prism/1.8.4/components/prism-",
x, ".min.js")
)
)
})
}
## format code with tags and language
prismAddTags <- function(code, language = "r") {
paste0("<pre><code class = 'language-", language, "'>",
code,
"</code></pre>")
}
prismCodeBlock <- function(code, language = "r") {
tagList(
HTML(prismAddTags(code, language = language)),
tags$script("Prism.highlightAll()")
)
}
## run app
library(shiny)
runApp(list(
ui = bootstrapPage(
prismDependencies,
prismLanguageDependencies(c("sql", "r", "python")),
sliderInput("mu", "Mean", min=-30, max=30, value=0, step=0.2),
uiOutput('r_chunk'),
uiOutput('python_chunk'),
uiOutput('sql_chunk')
),
server = function(input, output) {
output$r_chunk <- renderUI({
prismCodeBlock(
code = paste0("# this is R code\ndnorm(0, ", input$mu,", 2)"),
language = "r"
)
})
output$python_chunk <- renderUI({
prismCodeBlock(
code = '# this is python code
# Say hello, world.
print ("Hello, world!")',
language = "python"
)
})
output$sql_chunk <- renderUI({
prismCodeBlock(
code = "-- this is SQL code
SELECT * FROM mytable WHERE 1=2",
language = "sql"
)
})
}
))
n <- 10000
aMu <- rnorm(n)
aVar <- rgamma(n,shape = 1)
lResult <- rnorm(n,aMu,aVar)
hist(rexp(1000,aVar),100)
fSelectDistribution <- function(aDist){
lResult <- switch(aDist,
normal=dnorm,
gamma=dgamma,
uniform=dunif,
t = dt,
exponential=dexp,
beta=dbeta,
dnorm)
return(lResult)
}
aDist <- fSelectDistribution("t")
plot(seq(0,10,0.1),unlist(lapply(seq(0,10,0.1), function(x) aDist(x,df=1))))
lPMF <- dpois(lScale,1)
dataF <- data.frame(a=lScale,pmf=lPMF)
ggplot(data=dataF, aes(x=factor(a), y=pmf)) +
geom_bar(stat="identity", position=position_dodge()) + xlab('X')
library(mvtnorm)
x.points <- seq(-3,3,length.out=100)
y.points <- x.points
z <- matrix(0,nrow=100,ncol=100)
mu <- c(1,1)
sigma <- matrix(c(1,1,1,1),nrow=2)
for (i in 1:100) {
for (j in 1:100) {
z[i,j] <- mvtnorm::dmvt(c(x.points[i],y.points[j]),
delta=c(0,0),sigma=matrix(c(1,0,0,1),nrow=2),df=10,log=FALSE)
}
}
dataF <- melt(z)
dataF$X1<-rep(x.points,100)
dataF$X2 <- unlist(lapply(x.points,function(x) rep(x,100)))
names(dataF) <- c("x", "y", "z")
v <- ggplot(dataF, aes(x, y, z = z))
v + stat_contour(geom="polygon", aes(fill=..level..))+ stat_contour()
library(reshape)
pmvnorm(lower=c(-Inf,-Inf),upper=c(0,0),mean=c(0,0),sigma=matrix(c(1,0,0,1),nrow=2))[1]
install.packages('plotly')
runApp("Plotly-test",display.mode = "showcase")
library(plotly)
set.seed(100)
d <- diamonds[sample(nrow(diamonds), 1000), ]
p <- plot_ly(midwest, x = percollege, color = state, type = "box")
p
Sys.setenv("plotly_username"="ben18785")
Sys.setenv("plotly_api_key"="gjue7w0w41")
n <- 10000
lWishart<-rWishart(n,8,diag(6))
lSigma1 <- sqrt(lWishart[1,1,])
lSigma2 <- sqrt(lWishart[2,2,])
lRho12 <- lWishart[1,2,]/(lSigma1 * lSigma2)
hist(lRho12)
hist(lWishart[1,1,])
n <- 10000
lList<-rWishart(n,5,diag(4))
lOffD <- vector(length=n)
lOffD1 <- vector(length=n)
lDiag <- vector(length=n)
for (i in 1:n){
aMatrix <- solve(lList[,,i])
lOffD[i] <- aMatrix[1,2]/sqrt(aMatrix[1,1]*aMatrix[2,2])
lOffD1[i] <- aMatrix[2,3]/sqrt(aMatrix[2,2]*aMatrix[3,3])
lDiag[i] <- aMatrix[1,1]
}
hist(lOffD)
hist(lOffD1)
hist(log(lDiag))
library(MASS)
a<-kde2d(lOffD,lOffD1,n=50)
image(a)
h1 <- qplot(lOffD)
h2 <- hist(lOffD1, breaks=25, plot=F)
top <- max(h1$counts, h2$counts)
k <- kde2d(lOffD, lOffD1, n=50)
k <- m
# margins
oldpar <- par()
par(mar=c(3,3,1,1))
layout(matrix(c(2,0,1,3),2,2,byrow=T),c(3,1), c(1,3))
k #plot the image
par(mar=c(0,2,1,0))
barplot(h1$counts, axes=F, ylim=c(0, top), space=0, col='red')
par(mar=c(2,0,0.5,1))
barplot(h2$counts, axes=F, xlim=c(0, top), space=0, col='red', horiz=T)
library(ggplot2)
library(ggExtra)
library(grid)
library(gridExtra)
aDataF <- data.frame(x=lOffD,y=lOffD1)
m <- ggplot(aDataF, aes(x = x, y = y)) +
geom_point() + xlab(expression(rho[12])) + ylab(expression(rho[23]))
p <- m + geom_density2d(size=1)
p1<-ggExtra::ggMarginal(p,type = "histogram",fill=I("blue"))
grid.arrange(h1,p1,ncol = 2, top = "Main title")
p1 = qplot(1:10, rnorm(10))
p2 = qplot(1:10, rnorm(10))
hist(lList[1,2,]/sqrt(lList[1,1,]*lList[2,2,]))
lIWishart <- lapply(seq(1,10000,1),function(x) rinvwishart(8,diag(4)))
lVec <- vector(length = 10000)
for (i in 1:10000){
lVec[i]<-lIWishart[[i]][1,1]
}
hist(lVec)
install.packages('VarianceGamma')
library(VarianceGamma)
install.packages('grDevices')
### Code for plotting inverse Wishart
lList<-rWishart(input$sampleSizeInvWish,1,diag(4))
lOffD <- vector(length=input$sampleSizeInvWish)
lOffD1 <- vector(length=input$sampleSizeInvWish)
lDiag <- vector(length=input$sampleSizeInvWish)
for (i in 1:input$sampleSizeInvWish){
aMatrix <- lList[,,i]
lOffD[i] <- aMatrix[1,2]/sqrt(aMatrix[1,1]*aMatrix[2,2])
lOffD1[i] <- aMatrix[2,3]/sqrt(aMatrix[2,2]*aMatrix[3,3])
lDiag[i] <- aMatrix[1,1]
}
h1 <- qplot(log(lDiag),fill=I("blue")) + xlab(expression(paste("log(",sigma[1]^2,")")))
aDataF <- data.frame(x=lOffD,y=lOffD1)
m <- ggplot(aDataF, aes(x = x, y = y)) +
geom_point() + xlab(expression(rho[12])) + ylab(expression(rho[23]))
p <- m + geom_density2d(size=1)
p1<-ggExtra::ggMarginal(p,type = "histogram",fill=I("blue"))
grid.arrange(h1,p1,ncol = 2)
a1<- a2<- a3<- 1
a2<- .5
a3<- .5
x1<- x2<- seq(0.01, .99, by=.01)
f<- function(x1, x2){
term1<- gamma(a1+a2+a3)/(gamma(a1)*gamma(a2)*gamma(a3))
term2<- x1^(a1-1)*x2^(a2-1)*(1-x1-x2)^(a3-1)
term3<- (x1 + x2< 1)
term1*term2*term3
}
z<- outer(x1, x2, f)
z[z<=0]<- NA
persp(x1, x2, z,main = "Dirichlet Distribution",
col = "lightblue",
theta = 100,
phi = 20,
r = 50,
d = 0.1,
expand = 0.5,
ltheta = 90,
lphi = 180,
shade = 0.75,
ticktype = "detailed",
nticks = 5,
zlim = if(length(na.omit(unique(as.vector(z))))< 2){ c(0,2.1) }else { NULL})
library(DirichletReg)
plot(DR_data(rdirichlet(1000, c(1,1))))
plot(DR_data(rdirichlet(1000, c(4,1,1))), a2d = list(colored = FALSE))
output$myWebGL <- renderWebGL({
points3d(1:10, 1:10, 1:10)
axes3d()
})
install.packages('shinyRGL')
library(shinyRGL)
library(scatterplot3d)
X <- t(as.matrix(expand.grid(0:6, 0:6)))
X <- X[ , colSums(X) <= 6]; X <- rbind(X, 6 - colSums(X))
Z <- round(apply(X, 2, function(x) dmultinom(x, prob = 1:3)), 3)
A <- data.frame(x = X[1, ], y = X[2, ], probability = Z)
scatterplot3d(A, type = "h", lwd = 3, box = FALSE,angle=300)
lUpperSigma <- rgamma(input$sampleSize,1,1)
lData <- rnorm(1000)
lData <- data.frame(data=lData)
ggplot(lData, aes(data)) +
geom_histogram(fill=I("blue"))
install.packages('d3Network')
|
context("Setting and getting node attributes")
test_that("setting node attributes is possible", {
# Create a graph
graph <-
create_graph() %>%
add_path(8)
# Set attribute for node `1`
graph_set_a <-
set_node_attrs(
graph,
nodes = 1,
node_attr = "value",
values = 5)
# Expect that node `1` has node attr set for `value`
expect_equal(
graph_set_a$nodes_df[
which(graph_set_a$nodes_df$id == 1), 4], 5)
# Expect that node `1` has node attr set for `value`
expect_equal(
get_cache(
cache_node_attrs(
graph_set_a,
node_attr = "value",
nodes = 1)), 5)
# Set attribute for node `1` with a different value
graph_set_a <-
set_node_attrs(
graph,
nodes = 1,
node_attr = "value",
values = 8)
# Expect that node `1` has node attr set for `value`
expect_equal(
get_cache(
cache_node_attrs(
graph_set_a,
node_attr = "value",
nodes = 1)), 8)
# Select node `1`
graph_select_a <- select_nodes(graph, nodes = 1)
# Set attribute for selected node `1`
graph_select_a <-
set_node_attrs_ws(
graph_select_a,
node_attr = "value",
value = 5)
# Expect that node `1` has node attr set for `value`
expect_equal(
graph_select_a$nodes_df[
which(graph_select_a$nodes_df$id == 1), 4], 5)
# Set attribute for all nodes
graph_set_all <-
set_node_attrs(
graph,
node_attr = "value",
values = 5)
# Expect that all nodes have the attribute set
expect_true(
all(graph_set_all$nodes_df$value == 5))
# Select node `1` and apply a node attribute
# using that node selection
graph_node_selection <-
graph %>%
select_nodes(nodes = 1) %>%
set_node_attrs_ws(node_attr = "value", value = 5)
# Expect that node `1` has node attr set for `value`
expect_equal(
graph_node_selection$nodes_df[
which(graph_node_selection$nodes_df[, 1] == 1), 4], 5)
# Expect that getting the node attribute from a
# selection works in the same way
expect_equal(
get_cache(
cache_node_attrs_ws(
graph_node_selection, node_attr = "value")), 5)
# Get the node data frame from the graph as a separate object
graph_node_df <- graph$nodes_df
# Set attribute for named node `1` in the ndf
graph_node_df_set_a <-
set_node_attrs(
graph_node_df,
nodes = 1,
node_attr = "value",
values = 5)
# Expect that node `1` has node attr set for `value`
expect_equal(
graph_node_df_set_a[
which(graph_node_df_set_a[, 1] == 1), 4], 5)
# Set attribute for named node `1` with a different value
graph_node_df_set_a_node_attr_df <-
set_node_attrs(
graph_node_df_set_a,
nodes = 1,
node_attr = "value",
values = 8)
# Expect that node `1` in the ndf has node attr set for `value`
expect_equal(
graph_node_df_set_a_node_attr_df[
which(graph_node_df_set_a_node_attr_df[, 1] == 1), 4], 8)
# Set attribute for all nodes in the ndf
graph_node_df_set_all <-
set_node_attrs(
graph_node_df,
node_attr = "value",
values = 5)
# Expect that all nodes in the ndf will have the attribute set
expect_true(all(graph_node_df_set_all$value == 5))
# Expect that getting the node attribute from a graph without
# a selection will result in an error
expect_error(cache_node_attrs_ws(graph))
# Expect an error if the attribute selected is `id`
expect_error(
set_node_attrs(
graph,
nodes = 1,
node_attr = "id",
values = "B"))
# Expect an error if the length of `value` is greater than 1
expect_error(
set_node_attrs(
graph,
nodes = 1,
node_attr = "value",
values = c(1, 2)))
})
test_that("setting edge attributes is possible", {
# Create a graph
graph <-
create_graph() %>%
add_path(8)
# Set edge attribute for edge `1` -> `2`
graph_set_a_1 <-
set_edge_attrs(
graph,
from = 1,
to = 2,
edge_attr = "value",
values = 5)
# Expect that edge `1` -> `2` has edge attr set for `value`
expect_equal(
graph_set_a_1$edges_df[
which(graph_set_a_1$edges_df$from == 1 &
graph_set_a_1$edges_df$to == 2), 5], 5)
# Get edge attribute for edge `1` -> `2`
graph_set_a_1_edge_attr <-
get_cache(
cache_edge_attrs(
graph_set_a_1,
edge_attr = "value",
from = 1,
to = 2))
# Expect that edge `1` -> `2` has edge attr set for `value`
expect_equal(graph_set_a_1_edge_attr, 5)
# Set attribute for named edge `1` -> `2` with a different value
graph_set_a_1 <-
set_edge_attrs(
graph_set_a_1,
from = 1,
to = 2,
edge_attr = "value",
values = 8)
# Expect that edge `1` -> `2` has edge attr set for `value`
expect_equal(
get_cache(
cache_edge_attrs(
graph_set_a_1,
edge_attr = "value",
from = 1,
to = 2)), 8)
# Select edge `1` -> `2`
graph_select_a_1 <- select_edges(graph, from = 1, to = 2)
# Set attribute for selected edge `1` -> `2`
graph_select_a_1 <-
set_edge_attrs_ws(
graph_select_a_1,
edge_attr = "value",
value = 5)
# Expect that edge `1` -> `2` has edge attr set for `value`
expect_equal(
get_cache(
cache_edge_attrs(
graph_select_a_1,
edge_attr = "value",
from = 1,
to = 2)), 5)
# Set attribute for all edges
graph_set_all <-
set_edge_attrs(
graph,
edge_attr = "value",
values = 5)
# Expect that all edges have the attribute set
expect_true(all(graph_set_all$edges_df$value == 5))
# Select edge `1` -> `2` and apply an edge attribute using that
# edge selection
graph_edge_selection <-
graph %>%
select_edges(from = 1, to = 2) %>%
set_edge_attrs_ws(
edge_attr = "value", value = 5)
# Expect that edge `1` -> `2` has edge attr set for `value`
expect_equal(
graph_edge_selection$edges_df[
which(graph_edge_selection$edges_df$from == 1 &
graph_edge_selection$edges_df$to == 2), 5], 5)
})
test_that("Getting node attributes is possible", {
# Create a random graph with 4 nodes and 4 edges
random_graph <-
create_random_graph(
n = 4, m = 4,
set_seed = 23)
# Get node attributes for all nodes with the
# `value` attribute
all_nodes <- get_node_attrs(random_graph, "value")
# Expect a numeric vector
expect_is(all_nodes, "numeric")
# Expect the vector to have length 4
expect_equal(length(all_nodes), 4)
# Expect certain names to be in the vector
expect_true(all(1:4 %in% names(all_nodes)))
# Expect certain values to be in the vector
expect_equal(all_nodes[[1]], 6.0)
expect_equal(all_nodes[[2]], 2.5)
expect_equal(all_nodes[[3]], 3.5)
expect_equal(all_nodes[[4]], 7.5)
# Get node attributes for nodes `1` and `3`
nodes_1_3 <-
get_node_attrs(
random_graph,
"value",
nodes = c(1, 3))
# Expect the vector to have length 2
expect_equal(length(nodes_1_3), 2)
# Expect certain names to be in the vector
expect_true(all(c(1, 3) %in% names(nodes_1_3)))
# Expect certain values to be in the vector
expect_equal(nodes_1_3[[1]], 6.0)
expect_equal(nodes_1_3[[2]], 3.5)
})
test_that("Getting edge attributes is possible", {
# Create a simple graph where edges have an edge
# attribute named `value`
graph <-
create_graph() %>%
add_n_nodes(4) %>%
{
edges <-
create_edge_df(
from = c(1, 2, 1, 4),
to = c(2, 3, 4, 3),
rel = "rel")
add_edge_df(., edges)
} %>%
set_edge_attrs(
"value", 1.6, 1, 2) %>%
set_edge_attrs(
"value", 4.3, 1, 4) %>%
set_edge_attrs(
"value", 2.9, 2, 3) %>%
set_edge_attrs(
"value", 8.4, 4, 3)
# Get node attributes for all nodes with the
# `value` attribute
all_edges <- get_edge_attrs(graph, "value")
# Expect a numeric vector
expect_is(all_edges, "numeric")
# Expect the vector to have length 4
expect_equal(length(all_edges), 4)
# Expect certain names to be in the vector
expect_true(
all(c("1 -> 2", "2 -> 3", "1 -> 4", "4 -> 3") %in%
names(all_edges)))
# Expect certain values to be in the vector
expect_equal(all_edges[[1]], 1.6)
expect_equal(all_edges[[2]], 2.9)
expect_equal(all_edges[[3]], 4.3)
expect_equal(all_edges[[4]], 8.4)
# Get only edge attribute values for specified
# edges using the `from` and `to` arguments
some_edges <-
get_edge_attrs(
graph,
"value",
c(1, 2), c(2, 3))
# Expect the vector to have length 2
expect_equal(length(some_edges), 2)
# Expect certain names to be in the vector
expect_true(
all(c("1 -> 2", "2 -> 3") %in%
names(some_edges)))
# Expect certain values to be in the vector
expect_equal(some_edges[[1]], 1.6)
expect_equal(some_edges[[2]], 2.9)
})
| /tests/testthat/test-set_get_node_edge_attrs.R | no_license | yflyzhang/DiagrammeR | R | false | false | 8,975 | r | context("Setting and getting node attributes")
test_that("setting node attributes is possible", {
# Create a graph
graph <-
create_graph() %>%
add_path(8)
# Set attribute for node `1`
graph_set_a <-
set_node_attrs(
graph,
nodes = 1,
node_attr = "value",
values = 5)
# Expect that node `1` has node attr set for `value`
expect_equal(
graph_set_a$nodes_df[
which(graph_set_a$nodes_df$id == 1), 4], 5)
# Expect that node `1` has node attr set for `value`
expect_equal(
get_cache(
cache_node_attrs(
graph_set_a,
node_attr = "value",
nodes = 1)), 5)
# Set attribute for node `1` with a different value
graph_set_a <-
set_node_attrs(
graph,
nodes = 1,
node_attr = "value",
values = 8)
# Expect that node `1` has node attr set for `value`
expect_equal(
get_cache(
cache_node_attrs(
graph_set_a,
node_attr = "value",
nodes = 1)), 8)
# Select node `1`
graph_select_a <- select_nodes(graph, nodes = 1)
# Set attribute for selected node `1`
graph_select_a <-
set_node_attrs_ws(
graph_select_a,
node_attr = "value",
value = 5)
# Expect that node `1` has node attr set for `value`
expect_equal(
graph_select_a$nodes_df[
which(graph_select_a$nodes_df$id == 1), 4], 5)
# Set attribute for all nodes
graph_set_all <-
set_node_attrs(
graph,
node_attr = "value",
values = 5)
# Expect that all nodes have the attribute set
expect_true(
all(graph_set_all$nodes_df$value == 5))
# Select node `1` and apply a node attribute
# using that node selection
graph_node_selection <-
graph %>%
select_nodes(nodes = 1) %>%
set_node_attrs_ws(node_attr = "value", value = 5)
# Expect that node `1` has node attr set for `value`
expect_equal(
graph_node_selection$nodes_df[
which(graph_node_selection$nodes_df[, 1] == 1), 4], 5)
# Expect that getting the node attribute from a
# selection works in the same way
expect_equal(
get_cache(
cache_node_attrs_ws(
graph_node_selection, node_attr = "value")), 5)
# Get the node data frame from the graph as a separate object
graph_node_df <- graph$nodes_df
# Set attribute for named node `1` in the ndf
graph_node_df_set_a <-
set_node_attrs(
graph_node_df,
nodes = 1,
node_attr = "value",
values = 5)
# Expect that node `1` has node attr set for `value`
expect_equal(
graph_node_df_set_a[
which(graph_node_df_set_a[, 1] == 1), 4], 5)
# Set attribute for named node `1` with a different value
graph_node_df_set_a_node_attr_df <-
set_node_attrs(
graph_node_df_set_a,
nodes = 1,
node_attr = "value",
values = 8)
# Expect that node `1` in the ndf has node attr set for `value`
expect_equal(
graph_node_df_set_a_node_attr_df[
which(graph_node_df_set_a_node_attr_df[, 1] == 1), 4], 8)
# Set attribute for all nodes in the ndf
graph_node_df_set_all <-
set_node_attrs(
graph_node_df,
node_attr = "value",
values = 5)
# Expect that all nodes in the ndf will have the attribute set
expect_true(all(graph_node_df_set_all$value == 5))
# Expect that getting the node attribute from a graph without
# a selection will result in an error
expect_error(cache_node_attrs_ws(graph))
# Expect an error if the attribute selected is `id`
expect_error(
set_node_attrs(
graph,
nodes = 1,
node_attr = "id",
values = "B"))
# Expect an error if the length of `value` is greater than 1
expect_error(
set_node_attrs(
graph,
nodes = 1,
node_attr = "value",
values = c(1, 2)))
})
test_that("setting edge attributes is possible", {
# Create a graph
graph <-
create_graph() %>%
add_path(8)
# Set edge attribute for edge `1` -> `2`
graph_set_a_1 <-
set_edge_attrs(
graph,
from = 1,
to = 2,
edge_attr = "value",
values = 5)
# Expect that edge `1` -> `2` has edge attr set for `value`
expect_equal(
graph_set_a_1$edges_df[
which(graph_set_a_1$edges_df$from == 1 &
graph_set_a_1$edges_df$to == 2), 5], 5)
# Get edge attribute for edge `1` -> `2`
graph_set_a_1_edge_attr <-
get_cache(
cache_edge_attrs(
graph_set_a_1,
edge_attr = "value",
from = 1,
to = 2))
# Expect that edge `1` -> `2` has edge attr set for `value`
expect_equal(graph_set_a_1_edge_attr, 5)
# Set attribute for named edge `1` -> `2` with a different value
graph_set_a_1 <-
set_edge_attrs(
graph_set_a_1,
from = 1,
to = 2,
edge_attr = "value",
values = 8)
# Expect that edge `1` -> `2` has edge attr set for `value`
expect_equal(
get_cache(
cache_edge_attrs(
graph_set_a_1,
edge_attr = "value",
from = 1,
to = 2)), 8)
# Select edge `1` -> `2`
graph_select_a_1 <- select_edges(graph, from = 1, to = 2)
# Set attribute for selected edge `1` -> `2`
graph_select_a_1 <-
set_edge_attrs_ws(
graph_select_a_1,
edge_attr = "value",
value = 5)
# Expect that edge `1` -> `2` has edge attr set for `value`
expect_equal(
get_cache(
cache_edge_attrs(
graph_select_a_1,
edge_attr = "value",
from = 1,
to = 2)), 5)
# Set attribute for all edges
graph_set_all <-
set_edge_attrs(
graph,
edge_attr = "value",
values = 5)
# Expect that all edges have the attribute set
expect_true(all(graph_set_all$edges_df$value == 5))
# Select edge `1` -> `2` and apply an edge attribute using that
# edge selection
graph_edge_selection <-
graph %>%
select_edges(from = 1, to = 2) %>%
set_edge_attrs_ws(
edge_attr = "value", value = 5)
# Expect that edge `1` -> `2` has edge attr set for `value`
expect_equal(
graph_edge_selection$edges_df[
which(graph_edge_selection$edges_df$from == 1 &
graph_edge_selection$edges_df$to == 2), 5], 5)
})
test_that("Getting node attributes is possible", {
# Create a random graph with 4 nodes and 4 edges
random_graph <-
create_random_graph(
n = 4, m = 4,
set_seed = 23)
# Get node attributes for all nodes with the
# `value` attribute
all_nodes <- get_node_attrs(random_graph, "value")
# Expect a numeric vector
expect_is(all_nodes, "numeric")
# Expect the vector to have length 4
expect_equal(length(all_nodes), 4)
# Expect certain names to be in the vector
expect_true(all(1:4 %in% names(all_nodes)))
# Expect certain values to be in the vector
expect_equal(all_nodes[[1]], 6.0)
expect_equal(all_nodes[[2]], 2.5)
expect_equal(all_nodes[[3]], 3.5)
expect_equal(all_nodes[[4]], 7.5)
# Get node attributes for nodes `1` and `3`
nodes_1_3 <-
get_node_attrs(
random_graph,
"value",
nodes = c(1, 3))
# Expect the vector to have length 2
expect_equal(length(nodes_1_3), 2)
# Expect certain names to be in the vector
expect_true(all(c(1, 3) %in% names(nodes_1_3)))
# Expect certain values to be in the vector
expect_equal(nodes_1_3[[1]], 6.0)
expect_equal(nodes_1_3[[2]], 3.5)
})
test_that("Getting edge attributes is possible", {
# Create a simple graph where edges have an edge
# attribute named `value`
graph <-
create_graph() %>%
add_n_nodes(4) %>%
{
edges <-
create_edge_df(
from = c(1, 2, 1, 4),
to = c(2, 3, 4, 3),
rel = "rel")
add_edge_df(., edges)
} %>%
set_edge_attrs(
"value", 1.6, 1, 2) %>%
set_edge_attrs(
"value", 4.3, 1, 4) %>%
set_edge_attrs(
"value", 2.9, 2, 3) %>%
set_edge_attrs(
"value", 8.4, 4, 3)
# Get node attributes for all nodes with the
# `value` attribute
all_edges <- get_edge_attrs(graph, "value")
# Expect a numeric vector
expect_is(all_edges, "numeric")
# Expect the vector to have length 4
expect_equal(length(all_edges), 4)
# Expect certain names to be in the vector
expect_true(
all(c("1 -> 2", "2 -> 3", "1 -> 4", "4 -> 3") %in%
names(all_edges)))
# Expect certain values to be in the vector
expect_equal(all_edges[[1]], 1.6)
expect_equal(all_edges[[2]], 2.9)
expect_equal(all_edges[[3]], 4.3)
expect_equal(all_edges[[4]], 8.4)
# Get only edge attribute values for specified
# edges using the `from` and `to` arguments
some_edges <-
get_edge_attrs(
graph,
"value",
c(1, 2), c(2, 3))
# Expect the vector to have length 2
expect_equal(length(some_edges), 2)
# Expect certain names to be in the vector
expect_true(
all(c("1 -> 2", "2 -> 3") %in%
names(some_edges)))
# Expect certain values to be in the vector
expect_equal(some_edges[[1]], 1.6)
expect_equal(some_edges[[2]], 2.9)
})
|
#packages
library(tidyverse)
library(data.table)
#Set nga_data_layers git repo as working directory
#########################
# Exploratory Process #
#########################
# Start with phone and mobile data to explore NGA General Household Survey dataset
# develop data cleaning process
lga_labels <- fread('./data/interim/nga_lga_labels.csv') #get LGA labels
#Load data
data <- fread('./data/raw/sect4b_plantingw4.csv')
#layer_data <- na.omit(data, cols = 's4bq8')
have_phones <- data[s4bq8 == 1, .N, by=lga]
setkey(have_phones, lga)
setkey(lga_labels, value)
phone_and_labels <- lga_labels[have_phones, have_phones := i.N] #lga_labels[have_phones]
setnafill(phone_and_labels, fill=0, cols='have_phones')
internet <- data[s4bq14 == 1, .N, by=lga]
setkey(internet, lga)
setkey(phone_and_labels, value)
phone_and_labels[internet, have_internet := i.N]
setnafill(phone_and_labels, fill=0, cols='have_internet')
write.csv(phone_and_labels, './data/interim/nga_phone_internet.csv')
#################################
# Develop Data Extraction Loop #
#################################
nga_dta_list <- list.files(path = "./data/raw/", pattern = '*.csv')
mobile_bank <- fread(nga_dta_list[89])
| /src/data_processing/nga_mobile_data.R | permissive | catalyst-offgrid/nga_data_layers | R | false | false | 1,203 | r | #packages
library(tidyverse)
library(data.table)
#Set nga_data_layers git repo as working directory
#########################
# Exploratory Process #
#########################
# Start with phone and mobile data to explore NGA General Household Survey dataset
# develop data cleaning process
lga_labels <- fread('./data/interim/nga_lga_labels.csv') #get LGA labels
#Load data
data <- fread('./data/raw/sect4b_plantingw4.csv')
#layer_data <- na.omit(data, cols = 's4bq8')
have_phones <- data[s4bq8 == 1, .N, by=lga]
setkey(have_phones, lga)
setkey(lga_labels, value)
phone_and_labels <- lga_labels[have_phones, have_phones := i.N] #lga_labels[have_phones]
setnafill(phone_and_labels, fill=0, cols='have_phones')
internet <- data[s4bq14 == 1, .N, by=lga]
setkey(internet, lga)
setkey(phone_and_labels, value)
phone_and_labels[internet, have_internet := i.N]
setnafill(phone_and_labels, fill=0, cols='have_internet')
write.csv(phone_and_labels, './data/interim/nga_phone_internet.csv')
#################################
# Develop Data Extraction Loop #
#################################
nga_dta_list <- list.files(path = "./data/raw/", pattern = '*.csv')
mobile_bank <- fread(nga_dta_list[89])
|
\name{plotmap}
\alias{levelplot}
\alias{levelplot,formula,hyperSpec-method}
\alias{levelplot,hyperSpec,missing-method}
\alias{map.identify}
\alias{plotmap}
\alias{plotvoronoi}
\title{Plot a Map and Identify/Select Spectra in the Map
\code{\link[lattice]{levelplot}} functions for hyperSpec objects. An image or map of a summary
value of each spectrum is plotted. Spectra may be identified by mouse click.}
\usage{
plotmap(object, model = spc ~ x * y, func = mean,
func.args = list(), ...)
\S4method{levelplot}{hyperSpec,missing}(x, data, ...)
\S4method{levelplot}{formula,hyperSpec}(x, data,
transform.factor = TRUE, ..., contour = FALSE,
useRaster = !contour)
map.identify(object, model = spc ~ x * y,
voronoi = FALSE, ..., tol = 0.02, warn = TRUE)
plotvoronoi(object, model = spc ~ x * y,
use.tripack = FALSE, mix = FALSE, ...)
}
\arguments{
\item{object,data}{the \code{hyperSpec} object}
\item{model,x}{formula specifying the columns of object
that are to be displayed by
\code{\link[lattice]{levelplot}}}
\item{func,func.args}{Before plotting, \code{plotmap}
applies function \code{func} with the arguments given in
the list \code{func.args} to each of the spectra. Thus a
single summary value is displayed for each of the
spectra.
This can be suppressed manually by setting \code{func} to
NULL. It is automatically suppressed if
\code{.wavelength} appears in the formula.}
\item{voronoi}{Should the plot for identifying spectra by
mouse click be produced by \code{plotmap} (default) or
\code{plotvoronoi}?}
\item{\dots}{further arguments are passed down the call
chain, and finally to \code{\link[lattice]{levelplot}}}
\item{transform.factor}{If the color-coded variable is a
factor, should \code{\link{trellis.factor.key}} be used
to compute the color coding and legend?}
\item{contour,useRaster}{see
\code{\link[lattice]{levelplot}}}
\item{tol}{tolerance for \code{map.identify} as fraction
of the viewport (i.e. in "npc" \link[grid]{unit}s)}
\item{warn}{should a warning be issued if no point is
within the specified tolerance? See also details.}
\item{use.tripack}{Whether package tripack should be used
for calculating the voronoi polygons. If \code{FALSE},
package deldir is used instead. See details.}
\item{mix}{For Voronoi plots using package tripack, I
experienced errors if the data was spatially ordered.
Randomly rearrangig the rows of the hyperSpec object
circumvents this problem.}
}
\value{
\code{map.identify} returns a vector of row indices into
\code{object} of the clicked points.
The other functions return a lattice object.
}
\description{
The \code{model} can contain the special column name
\code{.wavelength} to specify the wavelength axis.
}
\details{
\code{plotmap}, \code{map.identify}, and the
\code{levelplot} methods internally use the same gateway
function to \code{\link[lattice]{levelplot}}. Thus
\code{transform.factor} can be used with all of them and
the panel function defaults to
\code{\link[lattice]{panel.levelplot.raster}} for all
three. Two special column names, \code{.rownames} and
\code{.wavelength} may be used.
\code{levelplot} plots the spectra matrix.
\code{plotvoronoi} calls \code{plotmap} with different
default settings, namely the panel function defaults to
\code{\link[latticeExtra]{panel.voronoi}}.
\code{\link[latticeExtra]{panel.voronoi}} depends on
either of the packages 'tripack' or 'deldir' being
installed. For further information, please consult the
help page of \code{\link[latticeExtra]{panel.voronoi}}.
On the \code{\link{chondro}} data set, \code{plotmap} is
roughly 5 times faster than \code{plotvoronoi} using
tripack, and ca. 15 times faster than \code{plotvoronoi}
using deldir. Package tripack, however, is free only for
non-commercial use. Also, it seems that tripack version
hang (R running at full CPU power, but not responding nor
finishing the calculation) for certain data sets. In this
case, \code{mix = TRUE} may help.
\code{map.identify} calls \code{plotmap} and
\code{plotvoronoi}, respectively and waits for (left)
mouse clicks on points. Other mouse clicks end the input.
Unlike \code{\link[lattice]{panel.identify}}, the indices
returned by \code{map.identify} are in the same order as
the points were clicked. Also, multiple clicks on the
same point are returned as multiple entries with the same
index.
\code{map.identify} uses option \code{debuglevel} similar
to \code{\link{spc.identify}}: \code{debuglevel == 1}
will plot the tolerance window if no data point was
inside (and additionally labels the point) while
\code{debuglevel == 2} will always plot the tolerance
window.
The \code{map.sel.*} functions offer further interactive
selection, see \code{\link{map.sel.poly}}.
}
\examples{
\dontrun{
vignette (plotting)
vignette (introduction)
}
levelplot (spc ~ y * x, chondro [,,1003]) # properly rotated
plotmap (chondro [,,1003])
# plot spectra matrix
levelplot (spc ~ .wavelength * t, laser, contour = TRUE, col = "#00000080")
# see also plotmat
plotmap (chondro, clusters ~ x * y)
# Voronoi plots
smpl <- sample (chondro, 300)
plotmap (smpl, clusters ~ x * y)
if (require (tripack))
plotvoronoi (smpl, clusters ~ x * y)
if (require (deldir))
plotvoronoi (smpl, clusters ~ x * y,
use.tripack = FALSE)
}
\author{
C. Beleites
}
\seealso{
\code{vignette (plotting)}, \code{vignette
(introduction)}
\code{\link{plot}}
\code{\link[lattice]{levelplot}}
\code{\link{trellis.factor.key}} for improved color
coding of factors
\code{\link[hyperSpec:options]{hyperSpec options}}
\code{\link{spc.identify}} \code{\link{map.sel.poly}}
\code{\link[latticeExtra]{panel.voronoi}}
}
\keyword{hplot}
| /man/levelplot.Rd | no_license | fornasaros/hyperSpec | R | false | false | 5,848 | rd | \name{plotmap}
\alias{levelplot}
\alias{levelplot,formula,hyperSpec-method}
\alias{levelplot,hyperSpec,missing-method}
\alias{map.identify}
\alias{plotmap}
\alias{plotvoronoi}
\title{Plot a Map and Identify/Select Spectra in the Map
\code{\link[lattice]{levelplot}} functions for hyperSpec objects. An image or map of a summary
value of each spectrum is plotted. Spectra may be identified by mouse click.}
\usage{
plotmap(object, model = spc ~ x * y, func = mean,
func.args = list(), ...)
\S4method{levelplot}{hyperSpec,missing}(x, data, ...)
\S4method{levelplot}{formula,hyperSpec}(x, data,
transform.factor = TRUE, ..., contour = FALSE,
useRaster = !contour)
map.identify(object, model = spc ~ x * y,
voronoi = FALSE, ..., tol = 0.02, warn = TRUE)
plotvoronoi(object, model = spc ~ x * y,
use.tripack = FALSE, mix = FALSE, ...)
}
\arguments{
\item{object,data}{the \code{hyperSpec} object}
\item{model,x}{formula specifying the columns of object
that are to be displayed by
\code{\link[lattice]{levelplot}}}
\item{func,func.args}{Before plotting, \code{plotmap}
applies function \code{func} with the arguments given in
the list \code{func.args} to each of the spectra. Thus a
single summary value is displayed for each of the
spectra.
This can be suppressed manually by setting \code{func} to
NULL. It is automatically suppressed if
\code{.wavelength} appears in the formula.}
\item{voronoi}{Should the plot for identifying spectra by
mouse click be produced by \code{plotmap} (default) or
\code{plotvoronoi}?}
\item{\dots}{further arguments are passed down the call
chain, and finally to \code{\link[lattice]{levelplot}}}
\item{transform.factor}{If the color-coded variable is a
factor, should \code{\link{trellis.factor.key}} be used
to compute the color coding and legend?}
\item{contour,useRaster}{see
\code{\link[lattice]{levelplot}}}
\item{tol}{tolerance for \code{map.identify} as fraction
of the viewport (i.e. in "npc" \link[grid]{unit}s)}
\item{warn}{should a warning be issued if no point is
within the specified tolerance? See also details.}
\item{use.tripack}{Whether package tripack should be used
for calculating the voronoi polygons. If \code{FALSE},
package deldir is used instead. See details.}
\item{mix}{For Voronoi plots using package tripack, I
experienced errors if the data was spatially ordered.
Randomly rearrangig the rows of the hyperSpec object
circumvents this problem.}
}
\value{
\code{map.identify} returns a vector of row indices into
\code{object} of the clicked points.
The other functions return a lattice object.
}
\description{
The \code{model} can contain the special column name
\code{.wavelength} to specify the wavelength axis.
}
\details{
\code{plotmap}, \code{map.identify}, and the
\code{levelplot} methods internally use the same gateway
function to \code{\link[lattice]{levelplot}}. Thus
\code{transform.factor} can be used with all of them and
the panel function defaults to
\code{\link[lattice]{panel.levelplot.raster}} for all
three. Two special column names, \code{.rownames} and
\code{.wavelength} may be used.
\code{levelplot} plots the spectra matrix.
\code{plotvoronoi} calls \code{plotmap} with different
default settings, namely the panel function defaults to
\code{\link[latticeExtra]{panel.voronoi}}.
\code{\link[latticeExtra]{panel.voronoi}} depends on
either of the packages 'tripack' or 'deldir' being
installed. For further information, please consult the
help page of \code{\link[latticeExtra]{panel.voronoi}}.
On the \code{\link{chondro}} data set, \code{plotmap} is
roughly 5 times faster than \code{plotvoronoi} using
tripack, and ca. 15 times faster than \code{plotvoronoi}
using deldir. Package tripack, however, is free only for
non-commercial use. Also, it seems that tripack version
hang (R running at full CPU power, but not responding nor
finishing the calculation) for certain data sets. In this
case, \code{mix = TRUE} may help.
\code{map.identify} calls \code{plotmap} and
\code{plotvoronoi}, respectively and waits for (left)
mouse clicks on points. Other mouse clicks end the input.
Unlike \code{\link[lattice]{panel.identify}}, the indices
returned by \code{map.identify} are in the same order as
the points were clicked. Also, multiple clicks on the
same point are returned as multiple entries with the same
index.
\code{map.identify} uses option \code{debuglevel} similar
to \code{\link{spc.identify}}: \code{debuglevel == 1}
will plot the tolerance window if no data point was
inside (and additionally labels the point) while
\code{debuglevel == 2} will always plot the tolerance
window.
The \code{map.sel.*} functions offer further interactive
selection, see \code{\link{map.sel.poly}}.
}
\examples{
\dontrun{
vignette (plotting)
vignette (introduction)
}
levelplot (spc ~ y * x, chondro [,,1003]) # properly rotated
plotmap (chondro [,,1003])
# plot spectra matrix
levelplot (spc ~ .wavelength * t, laser, contour = TRUE, col = "#00000080")
# see also plotmat
plotmap (chondro, clusters ~ x * y)
# Voronoi plots
smpl <- sample (chondro, 300)
plotmap (smpl, clusters ~ x * y)
if (require (tripack))
plotvoronoi (smpl, clusters ~ x * y)
if (require (deldir))
plotvoronoi (smpl, clusters ~ x * y,
use.tripack = FALSE)
}
\author{
C. Beleites
}
\seealso{
\code{vignette (plotting)}, \code{vignette
(introduction)}
\code{\link{plot}}
\code{\link[lattice]{levelplot}}
\code{\link{trellis.factor.key}} for improved color
coding of factors
\code{\link[hyperSpec:options]{hyperSpec options}}
\code{\link{spc.identify}} \code{\link{map.sel.poly}}
\code{\link[latticeExtra]{panel.voronoi}}
}
\keyword{hplot}
|
#'
knitr::opts_chunk$set(tidy=TRUE)
#' # `mediate()`
#'
mediation::mediate
#'
#'\newpage
#' # `polr()`
MASS::polr
logis.cdf <- function(x) expression(over(1, 1+e^(-(over(x-mu, s)))))
| /CLASS-CDA-ToDo/syntax/mediations.R | no_license | EccRiley/CLASS-CDA | R | false | false | 184 | r | #'
knitr::opts_chunk$set(tidy=TRUE)
#' # `mediate()`
#'
mediation::mediate
#'
#'\newpage
#' # `polr()`
MASS::polr
logis.cdf <- function(x) expression(over(1, 1+e^(-(over(x-mu, s)))))
|
#dplyr and Git beginner tutorial
#University of Delaware
#Last updated: September 7, 2015
#Ian Rambo
# ____ _ _ _ _ _ _
# | _ \(_) | | | | | | | | |
# | |_) |_ __| | __| | | ___ | | __ _| |__
# | _ <| |/ _` |/ _` | |/ _ \ | | / _` | '_ \
# | |_) | | (_| | (_| | | __/ | |___| (_| | |_) |
# |____/|_|\__,_|\__,_|_|\___| |______\__,_|_.__/
#GitHub repository URL: https://github.com/IMRambo/biddle-lab.git
#==============================================================================
#GOAL: learn how to use dplyr functions for data wrangling.
#Combine dplyr, tidyr, and ggplot functions.
#Collaborate on this script using Git.
#==============================================================================
library(dplyr)
library(ggplot2)
library(grid)
library(tidyr)
library(vegan)
#==============================================================================
#setwd("/Users/imrambo/Documents/Git/biddle-lab/")
#==============================================================================
#hflights
#==============================================================================
#Load the hflights dataset - all flights departing from Houston in 2011
library(hflights)
#Display header and first four rows of hflights
head(hflights, n = 4)
#note that this data frame is in long format
dim(hflights)
str(hflights)
#Carrier names, some real, most fake and/or goofy.
CarrierName <- c("American Airlines", "Lufthansa", "Fly Like a B6",
"Colorado Air", "HushHush Air", "Flying Monkey Air",
"Uzbekistan Airlines","US Air", "Windy Air", "Evil Air",
"FinnAir", "Soul Plane","Air Morocco",
"Xenophobic International", "Air Yugoslavia")
#Unique values of Houston carrier codes; pair with CarrierName vector
UniqueCarrier <- unique(hflights$UniqueCarrier)
#Create a data frame of character vectors containing "airline names" and
#carrier codes.
Carriers <- data.frame(CarrierName, UniqueCarrier)
Carriers$CarrierName <- as.character(CarrierName),
Carriers$UniqueCarrier <- as.character(UniqueCarrier))
str(Carriers)
#------------------------------------------------------------------------------
#Let's make use of dplyr's %>% operator, which allows us to chain
#operations together.
carrierVelocity <- hflights %>% filter(Cancelled == 0) %>%
rename(DistanceMiles = Distance) %>% #rename variable
left_join(Carriers) %>% #join the current dataframe and Carriers dataframe
#by a common variable.
#mutate() allows you to create new variables, and build new variables off
#those variables.
mutate(
DistanceKm = DistanceMiles/0.62137,
#Average air velocity, Km/hr
AvgAirVelocity = DistanceKm/(AirTime/60),
# %j format gives day of the year
DayOfYear = as.numeric(strftime(paste(Year,
Month,
DayofMonth,
sep = "-"),
format = "%j"))) %>%
#group_by(UniqueCarrier) %>%
#select certain variables
select(DayOfYear, DistanceKm, AvgAirVelocity, UniqueCarrier,
CarrierName)
#------------------------------------------------------------------------------
#dplyr can perform chained operations on data frames without having to save
#the intermediate results
carrierVelocity %>% group_by(DayOfYear, UniqueCarrier,
CarrierName) %>%
#DMAV - Daily Mean Air Velocity - daily mean air velocity (km/hr) for
#all flights of a carrier
#DMAD - Daily Mean Air Distance - daily mean air distance for
#all flights of a carrier
summarise(DMAV = mean(AvgAirVelocity),
DMAD = mean(DistanceKm)) %>%
#Select flights for American Airlines, Soul Plane, and Lufthansa
filter(UniqueCarrier == "AA"|
UniqueCarrier == "FL"|
UniqueCarrier == "AS") %>%
#we can pipe right into ggplot2
ggplot(aes(x = DayOfYear, y = DMAV,
#shape = factor(CarrierName),
size = DMAD, color = factor(CarrierName))) +
geom_jitter(alpha = 0.55) +
#scale_shape_manual(name = "Carrier", values = c(15, 17, 19)) +
scale_color_manual(name = "Carrier",
values = c("#009E73", "#56B4E9", "#CC79A7")) +
#geom_density2d(aes(colour = CarrierName), size = 0.5) +
#facet_grid(CarrierName ~ .) +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(color = "black"),
legend.key.size = unit(0.6, "cm")) +
xlab("Day of Year") + ylab("Daily Mean Air Velocity (km/hr)") +
scale_x_continuous(limits = c(1, 365))
#==============================================================================
#BCI
#==============================================================================
#Let's work with community population data - Barro Colorado Island tree counts
#from the vegan package
#Load BCI data from the vegan package
data(BCI, package = "vegan")
dim(BCI)
str(BCI)
#The hflights dataset was in long format, while BCI is in wide format. We can
#use the tidyr package to convert between wide and long.
#Create a character vector containing the 50 sites
BCI$Site <- paste("Site_", 1:50, sep = "")
#Create a spread, e.g. wide data frame from BCI data. Columns are now
#sites instead of species, with rows containing counts for each species.
BCI_spread <- BCI %>% gather(Species, Count,
Abarema.macradenia:Zuelania.guidonia,
convert = TRUE) %>%
spread(Site, Count)
#------------------------------------------------------------------------------
#Perform Nonmetric Multidimensional Scaling for BCI_spread
ord <- metaMDS(BCI_spread[, 2:ncol(BCI_spread)],
distance = "bray", autotransform = FALSE, k = 2)
#Extract species scores as numeric vectors
mdsx <- as.numeric(ord$species[, 1])
mdsy <- as.numeric(ord$species[, 2])
#Create a data frame of sites and MDS species
BCI_ord <- data.frame(BCI$Site, mdsx, mdsy) %>%
rename(Site = BCI.Site)
ShannonH <- diversity(BCI[, 2:(ncol(BCI) - 1)], index = "shannon")
BCI_ord <- cbind(BCI_ord, ShannonH)
BCI_ord$Site <- as.character(BCI_ord$Site)
#------------------------------------------------------------------------------
#Create a tidy data frame of Sites, Species, Counts, MDS species, and ShannonH
BCI_tidy <- BCI %>% gather(Species, Count, Abarema.macradenia:Zuelania.guidonia,
convert = TRUE) %>%
left_join(BCI_ord)
#Filter the tidy data frame for three species, with counts > 0
BCI_tidy %>% filter(Species == "Abarema.macradenia"|
Species == "Theobroma.cacao"|
Species == "Trema.micrantha", Count > 0) %>%
#Create a scatterplot of filtered data
ggplot(aes(x = mdsx, y = mdsy, size = Count,
shape = Species, color = ShannonH)) +
geom_point() + theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(color = "black")) +
scale_color_continuous(low = "darkred", high = "dodgerblue")
| /dplyr_git_tutorial_master.R | no_license | jfbiddle/biddle-lab | R | false | false | 7,232 | r | #dplyr and Git beginner tutorial
#University of Delaware
#Last updated: September 7, 2015
#Ian Rambo
# ____ _ _ _ _ _ _
# | _ \(_) | | | | | | | | |
# | |_) |_ __| | __| | | ___ | | __ _| |__
# | _ <| |/ _` |/ _` | |/ _ \ | | / _` | '_ \
# | |_) | | (_| | (_| | | __/ | |___| (_| | |_) |
# |____/|_|\__,_|\__,_|_|\___| |______\__,_|_.__/
#GitHub repository URL: https://github.com/IMRambo/biddle-lab.git
#==============================================================================
#GOAL: learn how to use dplyr functions for data wrangling.
#Combine dplyr, tidyr, and ggplot functions.
#Collaborate on this script using Git.
#==============================================================================
library(dplyr)
library(ggplot2)
library(grid)
library(tidyr)
library(vegan)
#==============================================================================
#setwd("/Users/imrambo/Documents/Git/biddle-lab/")
#==============================================================================
#hflights
#==============================================================================
#Load the hflights dataset - all flights departing from Houston in 2011
library(hflights)
#Display header and first four rows of hflights
head(hflights, n = 4)
#note that this data frame is in long format
dim(hflights)
str(hflights)
#Carrier names, some real, most fake and/or goofy.
CarrierName <- c("American Airlines", "Lufthansa", "Fly Like a B6",
"Colorado Air", "HushHush Air", "Flying Monkey Air",
"Uzbekistan Airlines","US Air", "Windy Air", "Evil Air",
"FinnAir", "Soul Plane","Air Morocco",
"Xenophobic International", "Air Yugoslavia")
#Unique values of Houston carrier codes; pair with CarrierName vector
UniqueCarrier <- unique(hflights$UniqueCarrier)
#Create a data frame of character vectors containing "airline names" and
#carrier codes.
Carriers <- data.frame(CarrierName, UniqueCarrier)
Carriers$CarrierName <- as.character(CarrierName),
Carriers$UniqueCarrier <- as.character(UniqueCarrier))
str(Carriers)
#------------------------------------------------------------------------------
#Let's make use of dplyr's %>% operator, which allows us to chain
#operations together.
carrierVelocity <- hflights %>% filter(Cancelled == 0) %>%
rename(DistanceMiles = Distance) %>% #rename variable
left_join(Carriers) %>% #join the current dataframe and Carriers dataframe
#by a common variable.
#mutate() allows you to create new variables, and build new variables off
#those variables.
mutate(
DistanceKm = DistanceMiles/0.62137,
#Average air velocity, Km/hr
AvgAirVelocity = DistanceKm/(AirTime/60),
# %j format gives day of the year
DayOfYear = as.numeric(strftime(paste(Year,
Month,
DayofMonth,
sep = "-"),
format = "%j"))) %>%
#group_by(UniqueCarrier) %>%
#select certain variables
select(DayOfYear, DistanceKm, AvgAirVelocity, UniqueCarrier,
CarrierName)
#------------------------------------------------------------------------------
#dplyr can perform chained operations on data frames without having to save
#the intermediate results
carrierVelocity %>% group_by(DayOfYear, UniqueCarrier,
CarrierName) %>%
#DMAV - Daily Mean Air Velocity - daily mean air velocity (km/hr) for
#all flights of a carrier
#DMAD - Daily Mean Air Distance - daily mean air distance for
#all flights of a carrier
summarise(DMAV = mean(AvgAirVelocity),
DMAD = mean(DistanceKm)) %>%
#Select flights for American Airlines, Soul Plane, and Lufthansa
filter(UniqueCarrier == "AA"|
UniqueCarrier == "FL"|
UniqueCarrier == "AS") %>%
#we can pipe right into ggplot2
ggplot(aes(x = DayOfYear, y = DMAV,
#shape = factor(CarrierName),
size = DMAD, color = factor(CarrierName))) +
geom_jitter(alpha = 0.55) +
#scale_shape_manual(name = "Carrier", values = c(15, 17, 19)) +
scale_color_manual(name = "Carrier",
values = c("#009E73", "#56B4E9", "#CC79A7")) +
#geom_density2d(aes(colour = CarrierName), size = 0.5) +
#facet_grid(CarrierName ~ .) +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(color = "black"),
legend.key.size = unit(0.6, "cm")) +
xlab("Day of Year") + ylab("Daily Mean Air Velocity (km/hr)") +
scale_x_continuous(limits = c(1, 365))
#==============================================================================
#BCI
#==============================================================================
#Let's work with community population data - Barro Colorado Island tree counts
#from the vegan package
#Load BCI data from the vegan package
data(BCI, package = "vegan")
dim(BCI)
str(BCI)
#The hflights dataset was in long format, while BCI is in wide format. We can
#use the tidyr package to convert between wide and long.
#Create a character vector containing the 50 sites
BCI$Site <- paste("Site_", 1:50, sep = "")
#Create a spread, e.g. wide data frame from BCI data. Columns are now
#sites instead of species, with rows containing counts for each species.
BCI_spread <- BCI %>% gather(Species, Count,
Abarema.macradenia:Zuelania.guidonia,
convert = TRUE) %>%
spread(Site, Count)
#------------------------------------------------------------------------------
#Perform Nonmetric Multidimensional Scaling for BCI_spread
ord <- metaMDS(BCI_spread[, 2:ncol(BCI_spread)],
distance = "bray", autotransform = FALSE, k = 2)
#Extract species scores as numeric vectors
mdsx <- as.numeric(ord$species[, 1])
mdsy <- as.numeric(ord$species[, 2])
#Create a data frame of sites and MDS species
BCI_ord <- data.frame(BCI$Site, mdsx, mdsy) %>%
rename(Site = BCI.Site)
ShannonH <- diversity(BCI[, 2:(ncol(BCI) - 1)], index = "shannon")
BCI_ord <- cbind(BCI_ord, ShannonH)
BCI_ord$Site <- as.character(BCI_ord$Site)
#------------------------------------------------------------------------------
#Create a tidy data frame of Sites, Species, Counts, MDS species, and ShannonH
BCI_tidy <- BCI %>% gather(Species, Count, Abarema.macradenia:Zuelania.guidonia,
convert = TRUE) %>%
left_join(BCI_ord)
#Filter the tidy data frame for three species, with counts > 0
BCI_tidy %>% filter(Species == "Abarema.macradenia"|
Species == "Theobroma.cacao"|
Species == "Trema.micrantha", Count > 0) %>%
#Create a scatterplot of filtered data
ggplot(aes(x = mdsx, y = mdsy, size = Count,
shape = Species, color = ShannonH)) +
geom_point() + theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(color = "black")) +
scale_color_continuous(low = "darkred", high = "dodgerblue")
|
##############################################################################################
# Load the profile and access data ahead of all the tests
#
skip_on_cran()
| /tests/testthat/test-aaaaaaaaaaa.R | no_license | CharlesCara/DatastreamDSWS2R | R | false | false | 172 | r | ##############################################################################################
# Load the profile and access data ahead of all the tests
#
skip_on_cran()
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/print.crm.r
\name{print.crm}
\alias{print.crm}
\title{Print model results}
\usage{
\method{print}{crm}(x,...)
}
\arguments{
\item{x}{crm model result or list of model results}
\item{...}{generic arguments not used here}
}
\value{
prints a simple summary of the model to the screen and returns NULL.
}
\description{
Provides a printed simple summary of the model results.
}
\author{
Jeff Laake
}
\seealso{
\code{\link{crm}}
}
\keyword{utility}
| /marked/man/print.crm.Rd | no_license | bmcclintock/marked | R | false | false | 558 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/print.crm.r
\name{print.crm}
\alias{print.crm}
\title{Print model results}
\usage{
\method{print}{crm}(x,...)
}
\arguments{
\item{x}{crm model result or list of model results}
\item{...}{generic arguments not used here}
}
\value{
prints a simple summary of the model to the screen and returns NULL.
}
\description{
Provides a printed simple summary of the model results.
}
\author{
Jeff Laake
}
\seealso{
\code{\link{crm}}
}
\keyword{utility}
|
## Straightforwardly, apply some function (`transformation`)
## to every terminal node in the stagerunner. This is useful for
## simple debugging and monitoring. For example, if we wish to
## print the variables currently in the context of stagerunner
## prior to executing each stage, we can call
##
## ```r
## runner$transform(function(fn) {
## function(context, ...) {
## print(ls(context))
## fn(context, ...)
## }
## })
## ```
#' Transform the callable's of the terminal nodes of a stageRunner.
#'
#' Every terminal node in a stageRunner is of type stageRunnerNode.
#' These each have a callable, and this method transforms those
#' callables in the way given by the first argument.
#'
#' @name stageRunner_transform
#' @param transformation function. The function which transforms one callable
#' into another.
stageRunner_transform <- function(transformation) {
for (stage_index in seq_along(self$stages)) {
self$stages[[stage_index]]$transform(transformation)
}
self
}
| /R/stagerunner-transform.R | permissive | syberia/stagerunner | R | false | false | 1,004 | r | ## Straightforwardly, apply some function (`transformation`)
## to every terminal node in the stagerunner. This is useful for
## simple debugging and monitoring. For example, if we wish to
## print the variables currently in the context of stagerunner
## prior to executing each stage, we can call
##
## ```r
## runner$transform(function(fn) {
## function(context, ...) {
## print(ls(context))
## fn(context, ...)
## }
## })
## ```
#' Transform the callable's of the terminal nodes of a stageRunner.
#'
#' Every terminal node in a stageRunner is of type stageRunnerNode.
#' These each have a callable, and this method transforms those
#' callables in the way given by the first argument.
#'
#' @name stageRunner_transform
#' @param transformation function. The function which transforms one callable
#' into another.
stageRunner_transform <- function(transformation) {
for (stage_index in seq_along(self$stages)) {
self$stages[[stage_index]]$transform(transformation)
}
self
}
|
library("DEP")
library("dplyr")
library("EnrichmentBrowser")
library("limma")
library("Biobase")
library("dendextend")
library("RSkittleBrewer")
library("preprocessCore")
library("vsn")
library("broom")
library("snpStats")
library("sva")
library("genefilter")
#Read proteinGroups file
proteinGroups <- read.table(file = "proteinGroups.txt",
header = TRUE,sep = "\t",quote = "\"'",dec = ".",numerals = c("warn.loss"),
row.names = NULL,na.strings = c("NA","NaN","Infinite"))
proteinGroups <- as.data.frame(proteinGroups)
#Filter dataset
#Filter for contaminant proteins, reverse hits, and only identifed my site.
proteinGroups <- filter(proteinGroups, Reverse != "+", Potential.contaminant != "+", Only.identified.by.site != "+")
#Filter for protines with 2 or more unique peptides
proteinGroups <- filter(proteinGroups, Unique.peptides.O > 1)
#Filter for protines with 2 or more unique peptides
proteinGroups <- filter(proteinGroups, Reporter.intensity.count.1.O > 2)
## Make row ID unique name
# Are there any duplicated gene names?
proteinGroups$Gene.names %>% duplicated() %>% any()
# Make unique names using the annotation in the "Gene.names" column as primary names and the annotation in "Protein.IDs" as name for those that do not have an gene name.
proteinGroups <- make_unique(proteinGroups, "Gene.names", "Protein.IDs", delim = ";")
rownames(proteinGroups) <- proteinGroups[,"name"]
#Build an ExpressionSet from proteinGroups data.frame
#1 Assay data
#create a minimal ExpressionSet object with just the assay data wanted
assaydata <- data.matrix(proteinGroups[,82:92])
DSM2020_O <- ExpressionSet(assayData=assaydata)
#2 Phenotypic data. Data describing treatment, control, batch, and other covariates.
#Make a phenotype table describing your experiment.
phenotable <- matrix(c("C","D","C","D","C","D","C","D","C","D","C"))
colnames(phenotable) <- c("Treatment")
rownames(phenotable) <- colnames(exprs(DSM2020_O))
phenotable <- as.data.frame(phenotable)
phenotable <- new("AnnotatedDataFrame", data = phenotable)
# Verify row names of phenotable are the same as the column names of the assay data of the expression set.
all(rownames(phenotable)==colnames(exprs(DSM2020_O)))
DSM2020_O <- ExpressionSet(assayData=assaydata, phenoData=phenotable)
#3 Feature data.
featuretable <- proteinGroups[c(1:26,104:114,126,128:147)]
featuretable <- as.data.frame(featuretable)
featuretable <- new("AnnotatedDataFrame", data = featuretable)
# Verify row names of featuretable are the same as the row names of the assay data of the expression set.
all(rownames(featuretable)==rownames(exprs(DSM2020_O)))
DSM2020_O <- ExpressionSet(assayData=assaydata, phenoData=phenotable, featureData=featuretable)
#ExpressionSet is ready
#Exploratory Analysis
trop = RSkittleBrewer("tropical")
palette(trop)
par(pch=19)
#Load dataset
pdata=pData(DSM2020_O)
edata=exprs(DSM2020_O)
fdata = fData(DSM2020_O)
#boxplot to look at log2 distribution of samples
boxplot(log2(edata+1),col=2)
#histograms
par(mfrow=c(1,2))
hist(log2(edata[,1]+1),col=2)
hist(log2(edata[,2]+1),col=2)
par(mfrow=c(1,1))
#density plot
plot(density(log2(edata[,1]+1)),col=2)
lines(density(log2(edata[,2]+1)),col=3)
lines(density(log2(edata[,3]+1)),col=4)
lines(density(log2(edata[,4]+1)),col=5)
lines(density(log2(edata[,5]+1)),col=6)
lines(density(log2(edata[,6]+1)),col=7)
lines(density(log2(edata[,7]+1)),col=8)
lines(density(log2(edata[,8]+1)),col=9)
lines(density(log2(edata[,9]+1)),col=10)
lines(density(log2(edata[,10]+1)),col=11)
lines(density(log2(edata[,11]+1)),col=12)
#qq-plot. Compare distributions of measurements before normalization.
qqplot(log2(edata[,1]+1), log2(edata[,8]+1),col=3)
abline(c(0,1))
#Bland Altman plot
mm = log2(edata[,1]+1) - log2(edata[,4]+1)
aa = log2(edata[,1]+1) + log2(edata[,4]+1)
plot(aa,mm,col=2)
#Heatmap
ematrix = as.matrix(edata)
heatmap(ematrix)
#change up the colors
colramp = colorRampPalette(c(3,"white",2))(9)
heatmap(ematrix,col=colramp)
#No clustering
heatmap(ematrix,col=colramp,Rowv=NA,Colv=NA)
#Clustering
#Log2 transform all edata
edata = log2(edata)
# By default calculates the euclidean distance between rows.
dist1 = dist(t(edata))
#Heatmap of euclidean distance between samples.
colramp = colorRampPalette(c(3,"white",2))(9)
heatmap(as.matrix(dist1),col=colramp,Colv=NA,Rowv=NA)
#hierarchical clustering of samples.
hclust1 = hclust(dist1)
plot(hclust1,hang=-1)
#color the dendrogram
dend = as.dendrogram(hclust1)
dend = color_labels(hclust1,4,col=1:4)
plot(dend)
#Kmeans clustering
kmeans1 = kmeans(edata,centers=3)
names(kmeans1)
#look at the cluster centers
matplot(t(kmeans1$centers),col=1:3,type="l",lwd=3)
#how many belong to each cluster
table(kmeans1$cluster)
#cluster the data together and plot
heatmap(as.matrix(edata)[order(kmeans1$cluster),],col=colramp,Colv=NA,Rowv=NA)
#Dimension reduction
#Mean center rowdata
edata_centered = edata - rowMeans(edata)
#calculate singular vectors
svd1 = svd(edata_centered)
names(svd1)
#Plot d singular values
plot(svd1$d,ylab="Singular value",col=2)
#Plot Percent Variance Explained
plot(svd1$d^2/sum(svd1$d^2),ylab="Percent Variance Explained",col=2)
#Plot top two singular vector decomposition values
par(mfrow=c(1,2))
plot(svd1$v[,1],col=2,ylab="1st PC")
plot(svd1$v[,2],col=2,ylab="2nd PC")
#Plot SV1 vs. SV2
par(mfrow=c(1,1))
plot(svd1$v[,1],svd1$v[,2],col=2,ylab="2nd PC",xlab="1st PC")
plot(svd1$v[,1],svd1$v[,2],ylab="2nd PC",
xlab="1st PC",col=as.numeric(pdata$Treatment))
#Boxplot of SV1 and Treatment
boxplot(svd1$v[,1] ~ pdata$Treatment,border=c(1,2))
points(svd1$v[,1] ~ jitter(as.numeric(pdata$Treatment)),col=as.numeric(pdata$Treatment))
#principal component vs singular vector.
pc1 = prcomp(edata)
plot(pc1$rotation[,1],svd1$v[,1])
#If you subtract the column means insted of the row means they are the same
edata_centered2 = t(t(edata) - colMeans(edata))
svd2 = svd(edata_centered2)
plot(pc1$rotation[,1],svd2$v[,1],col=2)
#Normalization
#Plot density distribution of samples
plot(density(edata[,1]),col=colramp[7],lwd=5,ylim=c(0,.2))
for(i in 2:11){lines(density(edata[,i]),lwd=5,col=colramp[i])}
#Quantile normalization
norm_edata = normalize.quantiles(as.matrix(edata))
plot(density(norm_edata[,1]),col=colramp[1],lwd=5,ylim=c(0,.20))
for(i in 2:11){lines(density(norm_edata[,i]),lwd=5,col=colramp[i+2])}
colnames(norm_edata) <- colnames(edata)
row.names(norm_edata) <- row.names(edata)
#VSN normalization
#vsnnorm_edata = justvsn(as.matrix(2^edata))
#plot(density(vsnnorm_edata[,1]),col=colramp[1],lwd=5,ylim=c(0,0.2))
#for(i in 2:11){lines(density(vsnnorm_edata[,i]),lwd=5,col=colramp[i+2])}
#Matching distributions leaves variability. Note normalization will not remove batch effects.
#plot(norm_edata[1,],col=as.numeric(pdata$Treatment))
#plot(vsnnorm_edata[1,],col=as.numeric(pdata$Treatment))
svd1 = svd(norm_edata - rowMeans(norm_edata))
plot(svd1$v[,1],svd1$v[,2],xlab="PC1",ylab="PC2",
col=as.numeric(pdata$Treatment), pch = 19, cex=2)
#Remove unknown batch variables with surrogate variable analysis.
mod = model.matrix(~Treatment,data=pdata)
mod0 = model.matrix(~1, data=pdata)
sva1 = sva(norm_edata,mod,mod0)
#Add the surrogate variables to the model matrix and perform the model fit.
modsv = cbind(mod,sva1$sv)
fitsv = lm.fit(modsv,t(norm_edata))
#T-statistic
tstats_obj = rowttests(norm_edata,pdata$Treatment)
names(tstats_obj)
hist(tstats_obj$statistic,col=2)
hist(tstats_obj$p.value,col=2)
#statistics with limma
fit_limma = lmFit (norm_edata, modsv)
ebayes_limma = eBayes(fit_limma)
head(ebayes_limma)
names(ebayes_limma)
hist(ebayes_limma$statistic,col=2)
hist(ebayes_limma$p.value[,"TreatmentD"],col=2)
plot(ebayes_limma$t[,2],-tstats_obj$statistic,col=4,
xlab="Moderated T-stat",ylab="T-stat")
abline(c(0,1),col="darkgrey",lwd=3)
#adjust P-values
qstats_obj <- p.adjust(ebayes_limma$p.value[,"TreatmentD"], method = "BH", length(ebayes_limma$p.value[,"TreatmentD"]))
sum(qstats_obj < 0.05)
#Adjusted p-values from limma.
#limma_pvals_adj = topTable(ebayes_limma,number=dim(edata)[1])$adj.P.Val
#hist(limma_pvals_adj,col=2)
#quantile(limma_pvals_adj)
#Make a proteingroups object with limma p values, q values, and expression diffrences
expdiff <- as.matrix(rowMeans(norm_edata[,c(1,3,5,7,9,11)])-rowMeans(norm_edata[,c(2,4,6,8,10)]))
colnames(expdiff) <- "log2 fold change"
p.value <- ebayes_limma$p.value[,"TreatmentD"]
names(p.value)[1] <- "p value"
qstats_obj <- as.matrix(qstats_obj)
names(qstats_obj)[1] <- "q stat"
negLogq = -log2(qstats_obj)
names(negLogq)[1] <- "-log2 q"
stats_table <- cbind( expdiff, p.value, qstats_obj, negLogq)
proteinGroups[,82:92] <- norm_edata[,1:11]
proteinGroups_limma <- merge(proteinGroups, stats_table, by=0)
write.csv(proteinGroups_limma, file ="proteinGroups_limma_vehicle_vs_1uM_bCX.csv")
| /itsar 2/Rwork/KrisWebb/20200424_limma_DSM_SVA_Organoid_Orange_vs_Vehicle.R | no_license | Linux-U/itsar_working | R | false | false | 9,008 | r | library("DEP")
library("dplyr")
library("EnrichmentBrowser")
library("limma")
library("Biobase")
library("dendextend")
library("RSkittleBrewer")
library("preprocessCore")
library("vsn")
library("broom")
library("snpStats")
library("sva")
library("genefilter")
#Read proteinGroups file
proteinGroups <- read.table(file = "proteinGroups.txt",
header = TRUE,sep = "\t",quote = "\"'",dec = ".",numerals = c("warn.loss"),
row.names = NULL,na.strings = c("NA","NaN","Infinite"))
proteinGroups <- as.data.frame(proteinGroups)
#Filter dataset
#Filter for contaminant proteins, reverse hits, and only identifed my site.
proteinGroups <- filter(proteinGroups, Reverse != "+", Potential.contaminant != "+", Only.identified.by.site != "+")
#Filter for protines with 2 or more unique peptides
proteinGroups <- filter(proteinGroups, Unique.peptides.O > 1)
#Filter for protines with 2 or more unique peptides
proteinGroups <- filter(proteinGroups, Reporter.intensity.count.1.O > 2)
## Make row ID unique name
# Are there any duplicated gene names?
proteinGroups$Gene.names %>% duplicated() %>% any()
# Make unique names using the annotation in the "Gene.names" column as primary names and the annotation in "Protein.IDs" as name for those that do not have an gene name.
proteinGroups <- make_unique(proteinGroups, "Gene.names", "Protein.IDs", delim = ";")
rownames(proteinGroups) <- proteinGroups[,"name"]
#Build an ExpressionSet from proteinGroups data.frame
#1 Assay data
#create a minimal ExpressionSet object with just the assay data wanted
assaydata <- data.matrix(proteinGroups[,82:92])
DSM2020_O <- ExpressionSet(assayData=assaydata)
#2 Phenotypic data. Data describing treatment, control, batch, and other covariates.
#Make a phenotype table describing your experiment.
phenotable <- matrix(c("C","D","C","D","C","D","C","D","C","D","C"))
colnames(phenotable) <- c("Treatment")
rownames(phenotable) <- colnames(exprs(DSM2020_O))
phenotable <- as.data.frame(phenotable)
phenotable <- new("AnnotatedDataFrame", data = phenotable)
# Verify row names of phenotable are the same as the column names of the assay data of the expression set.
all(rownames(phenotable)==colnames(exprs(DSM2020_O)))
DSM2020_O <- ExpressionSet(assayData=assaydata, phenoData=phenotable)
#3 Feature data.
featuretable <- proteinGroups[c(1:26,104:114,126,128:147)]
featuretable <- as.data.frame(featuretable)
featuretable <- new("AnnotatedDataFrame", data = featuretable)
# Verify row names of featuretable are the same as the row names of the assay data of the expression set.
all(rownames(featuretable)==rownames(exprs(DSM2020_O)))
DSM2020_O <- ExpressionSet(assayData=assaydata, phenoData=phenotable, featureData=featuretable)
#ExpressionSet is ready
#Exploratory Analysis
trop = RSkittleBrewer("tropical")
palette(trop)
par(pch=19)
#Load dataset
pdata=pData(DSM2020_O)
edata=exprs(DSM2020_O)
fdata = fData(DSM2020_O)
#boxplot to look at log2 distribution of samples
boxplot(log2(edata+1),col=2)
#histograms
par(mfrow=c(1,2))
hist(log2(edata[,1]+1),col=2)
hist(log2(edata[,2]+1),col=2)
par(mfrow=c(1,1))
#density plot
plot(density(log2(edata[,1]+1)),col=2)
lines(density(log2(edata[,2]+1)),col=3)
lines(density(log2(edata[,3]+1)),col=4)
lines(density(log2(edata[,4]+1)),col=5)
lines(density(log2(edata[,5]+1)),col=6)
lines(density(log2(edata[,6]+1)),col=7)
lines(density(log2(edata[,7]+1)),col=8)
lines(density(log2(edata[,8]+1)),col=9)
lines(density(log2(edata[,9]+1)),col=10)
lines(density(log2(edata[,10]+1)),col=11)
lines(density(log2(edata[,11]+1)),col=12)
#qq-plot. Compare distributions of measurements before normalization.
qqplot(log2(edata[,1]+1), log2(edata[,8]+1),col=3)
abline(c(0,1))
#Bland Altman plot
mm = log2(edata[,1]+1) - log2(edata[,4]+1)
aa = log2(edata[,1]+1) + log2(edata[,4]+1)
plot(aa,mm,col=2)
#Heatmap
ematrix = as.matrix(edata)
heatmap(ematrix)
#change up the colors
colramp = colorRampPalette(c(3,"white",2))(9)
heatmap(ematrix,col=colramp)
#No clustering
heatmap(ematrix,col=colramp,Rowv=NA,Colv=NA)
#Clustering
#Log2 transform all edata
edata = log2(edata)
# By default calculates the euclidean distance between rows.
dist1 = dist(t(edata))
#Heatmap of euclidean distance between samples.
colramp = colorRampPalette(c(3,"white",2))(9)
heatmap(as.matrix(dist1),col=colramp,Colv=NA,Rowv=NA)
#hierarchical clustering of samples.
hclust1 = hclust(dist1)
plot(hclust1,hang=-1)
#color the dendrogram
dend = as.dendrogram(hclust1)
dend = color_labels(hclust1,4,col=1:4)
plot(dend)
#Kmeans clustering
kmeans1 = kmeans(edata,centers=3)
names(kmeans1)
#look at the cluster centers
matplot(t(kmeans1$centers),col=1:3,type="l",lwd=3)
#how many belong to each cluster
table(kmeans1$cluster)
#cluster the data together and plot
heatmap(as.matrix(edata)[order(kmeans1$cluster),],col=colramp,Colv=NA,Rowv=NA)
#Dimension reduction
#Mean center rowdata
edata_centered = edata - rowMeans(edata)
#calculate singular vectors
svd1 = svd(edata_centered)
names(svd1)
#Plot d singular values
plot(svd1$d,ylab="Singular value",col=2)
#Plot Percent Variance Explained
plot(svd1$d^2/sum(svd1$d^2),ylab="Percent Variance Explained",col=2)
#Plot top two singular vector decomposition values
par(mfrow=c(1,2))
plot(svd1$v[,1],col=2,ylab="1st PC")
plot(svd1$v[,2],col=2,ylab="2nd PC")
#Plot SV1 vs. SV2
par(mfrow=c(1,1))
plot(svd1$v[,1],svd1$v[,2],col=2,ylab="2nd PC",xlab="1st PC")
plot(svd1$v[,1],svd1$v[,2],ylab="2nd PC",
xlab="1st PC",col=as.numeric(pdata$Treatment))
#Boxplot of SV1 and Treatment
boxplot(svd1$v[,1] ~ pdata$Treatment,border=c(1,2))
points(svd1$v[,1] ~ jitter(as.numeric(pdata$Treatment)),col=as.numeric(pdata$Treatment))
#principal component vs singular vector.
pc1 = prcomp(edata)
plot(pc1$rotation[,1],svd1$v[,1])
#If you subtract the column means insted of the row means they are the same
edata_centered2 = t(t(edata) - colMeans(edata))
svd2 = svd(edata_centered2)
plot(pc1$rotation[,1],svd2$v[,1],col=2)
#Normalization
#Plot density distribution of samples
plot(density(edata[,1]),col=colramp[7],lwd=5,ylim=c(0,.2))
for(i in 2:11){lines(density(edata[,i]),lwd=5,col=colramp[i])}
#Quantile normalization
norm_edata = normalize.quantiles(as.matrix(edata))
plot(density(norm_edata[,1]),col=colramp[1],lwd=5,ylim=c(0,.20))
for(i in 2:11){lines(density(norm_edata[,i]),lwd=5,col=colramp[i+2])}
colnames(norm_edata) <- colnames(edata)
row.names(norm_edata) <- row.names(edata)
#VSN normalization
#vsnnorm_edata = justvsn(as.matrix(2^edata))
#plot(density(vsnnorm_edata[,1]),col=colramp[1],lwd=5,ylim=c(0,0.2))
#for(i in 2:11){lines(density(vsnnorm_edata[,i]),lwd=5,col=colramp[i+2])}
#Matching distributions leaves variability. Note normalization will not remove batch effects.
#plot(norm_edata[1,],col=as.numeric(pdata$Treatment))
#plot(vsnnorm_edata[1,],col=as.numeric(pdata$Treatment))
svd1 = svd(norm_edata - rowMeans(norm_edata))
plot(svd1$v[,1],svd1$v[,2],xlab="PC1",ylab="PC2",
col=as.numeric(pdata$Treatment), pch = 19, cex=2)
#Remove unknown batch variables with surrogate variable analysis.
mod = model.matrix(~Treatment,data=pdata)
mod0 = model.matrix(~1, data=pdata)
sva1 = sva(norm_edata,mod,mod0)
#Add the surrogate variables to the model matrix and perform the model fit.
modsv = cbind(mod,sva1$sv)
fitsv = lm.fit(modsv,t(norm_edata))
#T-statistic
tstats_obj = rowttests(norm_edata,pdata$Treatment)
names(tstats_obj)
hist(tstats_obj$statistic,col=2)
hist(tstats_obj$p.value,col=2)
#statistics with limma
fit_limma = lmFit (norm_edata, modsv)
ebayes_limma = eBayes(fit_limma)
head(ebayes_limma)
names(ebayes_limma)
hist(ebayes_limma$statistic,col=2)
hist(ebayes_limma$p.value[,"TreatmentD"],col=2)
plot(ebayes_limma$t[,2],-tstats_obj$statistic,col=4,
xlab="Moderated T-stat",ylab="T-stat")
abline(c(0,1),col="darkgrey",lwd=3)
#adjust P-values
qstats_obj <- p.adjust(ebayes_limma$p.value[,"TreatmentD"], method = "BH", length(ebayes_limma$p.value[,"TreatmentD"]))
sum(qstats_obj < 0.05)
#Adjusted p-values from limma.
#limma_pvals_adj = topTable(ebayes_limma,number=dim(edata)[1])$adj.P.Val
#hist(limma_pvals_adj,col=2)
#quantile(limma_pvals_adj)
#Make a proteingroups object with limma p values, q values, and expression diffrences
expdiff <- as.matrix(rowMeans(norm_edata[,c(1,3,5,7,9,11)])-rowMeans(norm_edata[,c(2,4,6,8,10)]))
colnames(expdiff) <- "log2 fold change"
p.value <- ebayes_limma$p.value[,"TreatmentD"]
names(p.value)[1] <- "p value"
qstats_obj <- as.matrix(qstats_obj)
names(qstats_obj)[1] <- "q stat"
negLogq = -log2(qstats_obj)
names(negLogq)[1] <- "-log2 q"
stats_table <- cbind( expdiff, p.value, qstats_obj, negLogq)
proteinGroups[,82:92] <- norm_edata[,1:11]
proteinGroups_limma <- merge(proteinGroups, stats_table, by=0)
write.csv(proteinGroups_limma, file ="proteinGroups_limma_vehicle_vs_1uM_bCX.csv")
|
library(snpStats) ## SnpMatrix class
quiet <- suppressWarnings
test_gSM_array_GT <- function() {
mat <- matrix(c(".|.", "0|0", "0|1", "1|0", "1|1",
"./.", "0/0", "0/1", "1/0", "1/1"),
ncol=2, dimnames=list(1:5,1:2))
sm <- new("SnpMatrix",
matrix(as.raw(c(0, 1, 2, 2, 3,
0, 1, 2, 2, 3)),
nrow=2, byrow=TRUE, dimnames=list(1:2,1:5)))
ref <- DNAStringSet(rep("A",5))
alt <- DNAStringSetList(DNAStringSet("C"),
DNAStringSet("G"),
DNAStringSet("T"),
DNAStringSet("C"),
DNAStringSet("G"))
map <- DataFrame(snp.names=rownames(mat),
allele.1=ref,
allele.2=alt,
ignore=rep(FALSE,5))
gtsm <- genotypeToSnpMatrix(mat, ref, alt)
checkIdentical(sm, gtsm$genotypes)
checkIdentical(map, gtsm$map)
}
test_gSM_array_GT_2alt <- function() {
mat <- matrix(c("0|1", "1|0", "1|1",
"1/2", "2/1", "2/2"),
ncol=2, dimnames=list(1:3,1:2))
sm <- new("SnpMatrix",
matrix(as.raw(rep(0,6)),
nrow=2, byrow=TRUE, dimnames=list(1:2,1:3)))
ref <- DNAStringSet(rep("A",3))
alt <- DNAStringSetList(DNAStringSet(c("C","G")),
DNAStringSet(c("G","T")),
DNAStringSet(c("T","C")))
map <- DataFrame(snp.names=rownames(mat),
allele.1=ref,
allele.2=alt,
ignore=rep(TRUE,3))
gtsm <- quiet(genotypeToSnpMatrix(mat, ref, alt))
checkIdentical(sm, gtsm$genotypes)
checkIdentical(map, gtsm$map)
}
test_gSM_array_GT_nonsnv <- function() {
mat <- matrix(c("0|0", "0|1", "1|0",
"0/0", "0/1", "1/0"),
ncol=2, dimnames=list(1:3,1:2))
sm <- new("SnpMatrix",
matrix(as.raw(rep(0,6)),
nrow=2, byrow=TRUE, dimnames=list(1:2,1:3)))
ref <- DNAStringSet(c("A","ACG","ACG"))
alt <- DNAStringSetList(DNAStringSet("CGT"),
DNAStringSet("G"),
DNAStringSet("GAC"))
map <- DataFrame(snp.names=rownames(mat),
allele.1=ref,
allele.2=alt,
ignore=rep(TRUE,3))
gtsm <- quiet(genotypeToSnpMatrix(mat, ref, alt))
checkIdentical(sm, gtsm$genotypes)
checkIdentical(map, gtsm$map)
}
test_gSM_VCF_GL <- function() {
fl <- system.file("extdata", "gl_chr1.vcf", package="VariantAnnotation")
vcf <- readVcf(fl, "hg19")
gtsm <- quiet(genotypeToSnpMatrix(vcf, uncertain=TRUE))
checkIdentical(colnames(vcf), rownames(gtsm$genotypes))
checkIdentical(rownames(vcf), colnames(gtsm$genotypes))
checkIdentical(rownames(vcf), gtsm$map$snp.names)
checkIdentical(ref(vcf), gtsm$map$allele.1)
checkIdentical(alt(vcf), gtsm$map$allele.2)
checkEquals(unlist(GLtoGP(geno(vcf)$GL)[1,4]),
as.vector(g2post(gtsm$genotypes[4,1])))
}
test_gSM_VCF_structural <- function() {
fl <- system.file("extdata", "structural.vcf", package="VariantAnnotation")
vcf <- readVcf(fl, "hg19")
checkIdentical(VariantAnnotation:::.emptySnpMatrix(),
genotypeToSnpMatrix(vcf))
}
test_gSM_VCF_noSamples <- function() {
fl <- system.file("unitTests", "cases",
"FORMAT_header_no_SAMPLEs.vcf",
package="VariantAnnotation")
vcf <- readVcf(fl, "hg19")
gtsm <- quiet(genotypeToSnpMatrix(vcf))
checkEquals(0, nrow(gtsm$genotypes))
}
test_pSM_valid <- function() {
probs <- matrix(c(1,0,0,
0,1,0,
0,0,1,
NA,NA,NA),
ncol=3, byrow=TRUE,
dimnames=list(1:4,c("RR","RA","AA")))
sm <- new("SnpMatrix", matrix(as.raw(c(1,2,3,0)), nrow=1,
dimnames=list(NULL,1:4)))
checkIdentical(sm, probabilityToSnpMatrix(probs))
}
test_pSM_invalid <- function() {
# invalid matrix - probs do not sum to 1
probs <- matrix(c(1,1,0,
0,1,0,
0,0,1,
NA,NA,NA),
ncol=3, byrow=TRUE)
checkException(probabilityToSnpMatrix(probs))
}
test_pSM_onerow <- function() {
probs <- matrix(c(1,0,0,
NA,NA,NA),
ncol=3, byrow=TRUE,
dimnames=list(1:2,c("RR","RA","AA")))
sm <- new("SnpMatrix", matrix(as.raw(c(1,0)), nrow=1,
dimnames=list(NULL,1:2)))
checkIdentical(sm, probabilityToSnpMatrix(probs))
}
test_GLtoGP_array <- function() {
probs <- aperm(array(c(0.4,0.3,0.3,
0.5,0.1,0.4,
0.9,0.05,0.05,
0,1,0,
0,0,1,
1,NA,NA),
dim=c(3,3,2)),
c(2,3,1))
gl <- probs
for (i in 1:nrow(probs)) {
for (j in 1:ncol(probs)) {
gl[i,j,] <- log10(probs[i,j,])
}
}
gp <- GLtoGP(gl)
checkEquals(probs, gp)
}
test_GLtoGP_matrix <- function() {
probs <- matrix(c(list(c(0.4,0.3,0.3)),
list(c(0.5,0.1,0.4)),
list(c(0.9,0.05,0.05)),
list(c(0,1,0)),
list(c(0,0,1)),
list(c(1))),
ncol=2)
gl <- probs
for (i in 1:nrow(probs)) {
for (j in 1:ncol(probs)) {
gl[i,j] <- list(log10(unlist(probs[i,j])))
}
}
gp <- GLtoGP(gl)
checkEquals(probs, gp)
}
test_matrixToArray <- function() {
mat <- matrix(c(list(c(1,2,3)),
list(c(4,5,6)),
list(c(7,8,9)),
list(c(10,11,12)),
list(c(13,14)),
list(c(15))),
ncol=2)
arr <- VariantAnnotation:::.matrixOfListsToArray(mat)
for (i in 1:nrow(mat)) {
for (j in 1:ncol(mat)) {
n <- elementLengths(mat[i,j])
checkEquals(unlist(mat[i,j]), arr[i,j,1:n])
}
}
}
| /VariantAnnotation/inst/unitTests/test_genotypeToSnpMatrix.R | no_license | smgogarten/VCF_projects | R | false | false | 6,425 | r | library(snpStats) ## SnpMatrix class
quiet <- suppressWarnings
test_gSM_array_GT <- function() {
mat <- matrix(c(".|.", "0|0", "0|1", "1|0", "1|1",
"./.", "0/0", "0/1", "1/0", "1/1"),
ncol=2, dimnames=list(1:5,1:2))
sm <- new("SnpMatrix",
matrix(as.raw(c(0, 1, 2, 2, 3,
0, 1, 2, 2, 3)),
nrow=2, byrow=TRUE, dimnames=list(1:2,1:5)))
ref <- DNAStringSet(rep("A",5))
alt <- DNAStringSetList(DNAStringSet("C"),
DNAStringSet("G"),
DNAStringSet("T"),
DNAStringSet("C"),
DNAStringSet("G"))
map <- DataFrame(snp.names=rownames(mat),
allele.1=ref,
allele.2=alt,
ignore=rep(FALSE,5))
gtsm <- genotypeToSnpMatrix(mat, ref, alt)
checkIdentical(sm, gtsm$genotypes)
checkIdentical(map, gtsm$map)
}
test_gSM_array_GT_2alt <- function() {
mat <- matrix(c("0|1", "1|0", "1|1",
"1/2", "2/1", "2/2"),
ncol=2, dimnames=list(1:3,1:2))
sm <- new("SnpMatrix",
matrix(as.raw(rep(0,6)),
nrow=2, byrow=TRUE, dimnames=list(1:2,1:3)))
ref <- DNAStringSet(rep("A",3))
alt <- DNAStringSetList(DNAStringSet(c("C","G")),
DNAStringSet(c("G","T")),
DNAStringSet(c("T","C")))
map <- DataFrame(snp.names=rownames(mat),
allele.1=ref,
allele.2=alt,
ignore=rep(TRUE,3))
gtsm <- quiet(genotypeToSnpMatrix(mat, ref, alt))
checkIdentical(sm, gtsm$genotypes)
checkIdentical(map, gtsm$map)
}
test_gSM_array_GT_nonsnv <- function() {
mat <- matrix(c("0|0", "0|1", "1|0",
"0/0", "0/1", "1/0"),
ncol=2, dimnames=list(1:3,1:2))
sm <- new("SnpMatrix",
matrix(as.raw(rep(0,6)),
nrow=2, byrow=TRUE, dimnames=list(1:2,1:3)))
ref <- DNAStringSet(c("A","ACG","ACG"))
alt <- DNAStringSetList(DNAStringSet("CGT"),
DNAStringSet("G"),
DNAStringSet("GAC"))
map <- DataFrame(snp.names=rownames(mat),
allele.1=ref,
allele.2=alt,
ignore=rep(TRUE,3))
gtsm <- quiet(genotypeToSnpMatrix(mat, ref, alt))
checkIdentical(sm, gtsm$genotypes)
checkIdentical(map, gtsm$map)
}
test_gSM_VCF_GL <- function() {
fl <- system.file("extdata", "gl_chr1.vcf", package="VariantAnnotation")
vcf <- readVcf(fl, "hg19")
gtsm <- quiet(genotypeToSnpMatrix(vcf, uncertain=TRUE))
checkIdentical(colnames(vcf), rownames(gtsm$genotypes))
checkIdentical(rownames(vcf), colnames(gtsm$genotypes))
checkIdentical(rownames(vcf), gtsm$map$snp.names)
checkIdentical(ref(vcf), gtsm$map$allele.1)
checkIdentical(alt(vcf), gtsm$map$allele.2)
checkEquals(unlist(GLtoGP(geno(vcf)$GL)[1,4]),
as.vector(g2post(gtsm$genotypes[4,1])))
}
test_gSM_VCF_structural <- function() {
fl <- system.file("extdata", "structural.vcf", package="VariantAnnotation")
vcf <- readVcf(fl, "hg19")
checkIdentical(VariantAnnotation:::.emptySnpMatrix(),
genotypeToSnpMatrix(vcf))
}
test_gSM_VCF_noSamples <- function() {
fl <- system.file("unitTests", "cases",
"FORMAT_header_no_SAMPLEs.vcf",
package="VariantAnnotation")
vcf <- readVcf(fl, "hg19")
gtsm <- quiet(genotypeToSnpMatrix(vcf))
checkEquals(0, nrow(gtsm$genotypes))
}
test_pSM_valid <- function() {
probs <- matrix(c(1,0,0,
0,1,0,
0,0,1,
NA,NA,NA),
ncol=3, byrow=TRUE,
dimnames=list(1:4,c("RR","RA","AA")))
sm <- new("SnpMatrix", matrix(as.raw(c(1,2,3,0)), nrow=1,
dimnames=list(NULL,1:4)))
checkIdentical(sm, probabilityToSnpMatrix(probs))
}
test_pSM_invalid <- function() {
# invalid matrix - probs do not sum to 1
probs <- matrix(c(1,1,0,
0,1,0,
0,0,1,
NA,NA,NA),
ncol=3, byrow=TRUE)
checkException(probabilityToSnpMatrix(probs))
}
test_pSM_onerow <- function() {
probs <- matrix(c(1,0,0,
NA,NA,NA),
ncol=3, byrow=TRUE,
dimnames=list(1:2,c("RR","RA","AA")))
sm <- new("SnpMatrix", matrix(as.raw(c(1,0)), nrow=1,
dimnames=list(NULL,1:2)))
checkIdentical(sm, probabilityToSnpMatrix(probs))
}
test_GLtoGP_array <- function() {
probs <- aperm(array(c(0.4,0.3,0.3,
0.5,0.1,0.4,
0.9,0.05,0.05,
0,1,0,
0,0,1,
1,NA,NA),
dim=c(3,3,2)),
c(2,3,1))
gl <- probs
for (i in 1:nrow(probs)) {
for (j in 1:ncol(probs)) {
gl[i,j,] <- log10(probs[i,j,])
}
}
gp <- GLtoGP(gl)
checkEquals(probs, gp)
}
test_GLtoGP_matrix <- function() {
probs <- matrix(c(list(c(0.4,0.3,0.3)),
list(c(0.5,0.1,0.4)),
list(c(0.9,0.05,0.05)),
list(c(0,1,0)),
list(c(0,0,1)),
list(c(1))),
ncol=2)
gl <- probs
for (i in 1:nrow(probs)) {
for (j in 1:ncol(probs)) {
gl[i,j] <- list(log10(unlist(probs[i,j])))
}
}
gp <- GLtoGP(gl)
checkEquals(probs, gp)
}
test_matrixToArray <- function() {
mat <- matrix(c(list(c(1,2,3)),
list(c(4,5,6)),
list(c(7,8,9)),
list(c(10,11,12)),
list(c(13,14)),
list(c(15))),
ncol=2)
arr <- VariantAnnotation:::.matrixOfListsToArray(mat)
for (i in 1:nrow(mat)) {
for (j in 1:ncol(mat)) {
n <- elementLengths(mat[i,j])
checkEquals(unlist(mat[i,j]), arr[i,j,1:n])
}
}
}
|
##### Unit Testing in R: testthat() ####
# To illustrate unit testing in R, we will utilize the testthat package. (You get one guess as to who developed it. No, not me.)
# A part of a code base. Think of a situation where this is written off-the-cuff, without much thought.
char2int = function(character) { which(letters==character) }
# You told someone else on your development team that you wrote a function that returns an integer for each letter, e.g., “a” returns 1. That person then used test_that() to determine whether your function can be “broken.”
if ( require("testthat") == FALSE ) {
install.packages("testthat",repos="https://cloud.r-project.org")
library(testthat)
}
## Loading required package: testthat
## Error in get(genname, envir = envir) : object 'testthat_print' not found
test_that(desc = "Test for string of length greater than 1",expect_error(char2int("aa")))
## Error: Test failed: 'Test for string of length greater than 1'
## * `char2int("aa")` did not throw an error.
test_that(desc = "Test for improper input (numeric)", expect_error(char2int(1)))
## Error: Test failed: 'Test for improper input (numeric)'
## * `char2int(1)` did not throw an error.
char2int = function(character)
{
if ( typeof(character) != "character" ) stop("The input must be a character.")
if ( nchar(character) > 1 ) stop("The input character must be of length 1.")
which(letters==character)
}
test_that(desc = "Test for string of length greater than 1",expect_error(char2int("aa")))
test_that(desc = "Test for improper input (numeric)", expect_error(char2int(1)))
test_that(desc = "Test for expected output length (1)", expect_length(char2int("a"),1))
test_that(desc = "Test that output is integer", expect_type(char2int("a"),"integer"))
char2int("A")
## integer(0)
char2int(c("A","B","C"))
## Warning in if (nchar(character) > 1) stop("The input character must be of length
## 1."): the condition has length > 1 and only the first element will be used
## Warning in letters == character: longer object length is not a multiple of
## shorter object length
## integer(0)
char2int = function(character)
{
if ( typeof(character) != "character" ) stop("The input must be a character.")
if ( length(character) != 1 ) stop("The input must be a character vector of length 1.")
if ( nchar(character) > 1 ) stop("The input character must be of length 1.")
which(letters==tolower(character))
}
test_that(desc = "Test for string of length greater than 1", expect_error(char2int("aa")))
test_that(desc = "Test for improper input (numeric)", expect_error(char2int(1)))
test_that(desc = "Test for expected output length (1)", expect_length(char2int("a"),1))
test_that(desc = "Test that output is integer", expect_type(char2int("a"),"integer"))
test_that(desc = "Test that upper-case letters work", expect_equal(char2int("A"),1))
test_that(desc = "Test that the length of the input vector is 1",expect_error(char2int(letters)))
# expect_null(): use this when you have your code return NULL rather than stop when, e.g., a bad input is detected
# expect_silent(): use this when you expect no errors, warnings, or messages
# expect_output(): use this when you want to ensure that output is not returned invisibly or is not NULL | /UnitTesting.R | no_license | rubysheng/stats-R | R | false | false | 3,337 | r | ##### Unit Testing in R: testthat() ####
# To illustrate unit testing in R, we will utilize the testthat package. (You get one guess as to who developed it. No, not me.)
# A part of a code base. Think of a situation where this is written off-the-cuff, without much thought.
char2int = function(character) { which(letters==character) }
# You told someone else on your development team that you wrote a function that returns an integer for each letter, e.g., “a” returns 1. That person then used test_that() to determine whether your function can be “broken.”
if ( require("testthat") == FALSE ) {
install.packages("testthat",repos="https://cloud.r-project.org")
library(testthat)
}
## Loading required package: testthat
## Error in get(genname, envir = envir) : object 'testthat_print' not found
test_that(desc = "Test for string of length greater than 1",expect_error(char2int("aa")))
## Error: Test failed: 'Test for string of length greater than 1'
## * `char2int("aa")` did not throw an error.
test_that(desc = "Test for improper input (numeric)", expect_error(char2int(1)))
## Error: Test failed: 'Test for improper input (numeric)'
## * `char2int(1)` did not throw an error.
char2int = function(character)
{
if ( typeof(character) != "character" ) stop("The input must be a character.")
if ( nchar(character) > 1 ) stop("The input character must be of length 1.")
which(letters==character)
}
test_that(desc = "Test for string of length greater than 1",expect_error(char2int("aa")))
test_that(desc = "Test for improper input (numeric)", expect_error(char2int(1)))
test_that(desc = "Test for expected output length (1)", expect_length(char2int("a"),1))
test_that(desc = "Test that output is integer", expect_type(char2int("a"),"integer"))
char2int("A")
## integer(0)
char2int(c("A","B","C"))
## Warning in if (nchar(character) > 1) stop("The input character must be of length
## 1."): the condition has length > 1 and only the first element will be used
## Warning in letters == character: longer object length is not a multiple of
## shorter object length
## integer(0)
char2int = function(character)
{
if ( typeof(character) != "character" ) stop("The input must be a character.")
if ( length(character) != 1 ) stop("The input must be a character vector of length 1.")
if ( nchar(character) > 1 ) stop("The input character must be of length 1.")
which(letters==tolower(character))
}
test_that(desc = "Test for string of length greater than 1", expect_error(char2int("aa")))
test_that(desc = "Test for improper input (numeric)", expect_error(char2int(1)))
test_that(desc = "Test for expected output length (1)", expect_length(char2int("a"),1))
test_that(desc = "Test that output is integer", expect_type(char2int("a"),"integer"))
test_that(desc = "Test that upper-case letters work", expect_equal(char2int("A"),1))
test_that(desc = "Test that the length of the input vector is 1",expect_error(char2int(letters)))
# expect_null(): use this when you have your code return NULL rather than stop when, e.g., a bad input is detected
# expect_silent(): use this when you expect no errors, warnings, or messages
# expect_output(): use this when you want to ensure that output is not returned invisibly or is not NULL |
context("Gather")
test_that("gather all columns when ... is empty", {
df <- data.frame(
x = 1:5,
y = 6:10
)
out <- gather(df, key, val)
expect_equal(nrow(out), 10)
expect_equal(names(out), c("key", "val"))
})
test_that("gather returns input if no columns gathered", {
df <- data.frame(x = 1:2, y = 1:2)
out <- gather(df, a, b, -x, -y)
expect_equal(df, out)
})
test_that("if not supply, key and value default to key and value", {
df <- data.frame(x = 1:2)
out <- gather(df)
expect_equal(nrow(out), 2)
expect_equal(names(out), c("key", "value"))
})
test_that("Missing values removed when na.rm = TRUE", {
df <- data.frame(x = c(1, NA))
out <- gather(df, k, v)
expect_equal(out$v, df$x)
out <- gather(df, k, v, na.rm = TRUE)
expect_equal(out$v, 1)
})
test_that("key preserves column ordering", {
df <- data.frame(y = 1, x = 2)
out <- gather(df, k, v)
expect_equal(out$k, factor(c("y", "x"), levels = c("y", "x")))
})
test_that("preserve class of input", {
dat <- data.frame(x = 1:2)
dat %>% (dplyr::tbl_df) %>% gather %>% expect_is("tbl_df")
skip_if_not_installed("data.table")
dat %>% (dplyr::tbl_df) %>% gather %>% expect_is("tbl_df")
})
test_that("additional controls which columns to gather", {
data <- dplyr::data_frame(a = 1, b1 = 1, b2 = 2, b3 = 3)
out <- gather(data, key, val, b1:b3)
expect_equal(names(out), c("a", "key", "val"))
expect_equal(out$val, 1:3)
})
# Column types ------------------------------------------------------------
test_that("gather throws error for POSIXlt", {
df <- data.frame(y = 1)
df$x <- as.POSIXlt(Sys.time())
expect_error(gather(df, key, val, -x), "a POSIXlt")
})
test_that("factors coerced to characters, not integers", {
df <- data.frame(
v1 = 1:3,
v2 = factor(letters[1:3])
)
expect_warning(out <- gather(df, k, v),
"attributes are not identical across measure variables")
expect_equal(out$v, c(1:3, letters[1:3]))
})
test_that("attributes of id variables are preserved", {
df <- data.frame(x = factor(1:3), y = 1:3, z = 3:1)
out <- gather(df, key, val, -x)
expect_equal(attributes(df$x), attributes(out$x))
})
test_that("common attributes are preserved", {
df <- data.frame(date1 = Sys.Date(), date2 = Sys.Date() + 10)
out <- gather(df, k, v)
expect_is(out$v, "Date")
})
test_that("varying attributes are dropped with a warning", {
df <- data.frame(
date1 = as.POSIXct(Sys.Date()),
date2 = Sys.Date() + 10
)
expect_warning(gather(df, k, v),
"attributes are not identical across measure variables")
})
test_that("gather preserves OBJECT bit on e.g. POSIXct", {
df <- data.frame(now = Sys.time())
out <- gather(df, k, v)
object_bit_set <- function(x) {
grepl("\\[OBJ", capture.output(.Internal(inspect(x)))[1])
}
expect_true(object_bit_set(out$v))
})
test_that("can handle list-columns", {
df <- dplyr::data_frame(x = 1:2, y = list("a", TRUE))
out <- gather(df, k, v, -y)
expect_identical(out$y, df$y)
})
| /tests/testthat/test-gather.R | no_license | yifeizhang/tidyr | R | false | false | 3,010 | r | context("Gather")
test_that("gather all columns when ... is empty", {
df <- data.frame(
x = 1:5,
y = 6:10
)
out <- gather(df, key, val)
expect_equal(nrow(out), 10)
expect_equal(names(out), c("key", "val"))
})
test_that("gather returns input if no columns gathered", {
df <- data.frame(x = 1:2, y = 1:2)
out <- gather(df, a, b, -x, -y)
expect_equal(df, out)
})
test_that("if not supply, key and value default to key and value", {
df <- data.frame(x = 1:2)
out <- gather(df)
expect_equal(nrow(out), 2)
expect_equal(names(out), c("key", "value"))
})
test_that("Missing values removed when na.rm = TRUE", {
df <- data.frame(x = c(1, NA))
out <- gather(df, k, v)
expect_equal(out$v, df$x)
out <- gather(df, k, v, na.rm = TRUE)
expect_equal(out$v, 1)
})
test_that("key preserves column ordering", {
df <- data.frame(y = 1, x = 2)
out <- gather(df, k, v)
expect_equal(out$k, factor(c("y", "x"), levels = c("y", "x")))
})
test_that("preserve class of input", {
dat <- data.frame(x = 1:2)
dat %>% (dplyr::tbl_df) %>% gather %>% expect_is("tbl_df")
skip_if_not_installed("data.table")
dat %>% (dplyr::tbl_df) %>% gather %>% expect_is("tbl_df")
})
test_that("additional controls which columns to gather", {
data <- dplyr::data_frame(a = 1, b1 = 1, b2 = 2, b3 = 3)
out <- gather(data, key, val, b1:b3)
expect_equal(names(out), c("a", "key", "val"))
expect_equal(out$val, 1:3)
})
# Column types ------------------------------------------------------------
test_that("gather throws error for POSIXlt", {
df <- data.frame(y = 1)
df$x <- as.POSIXlt(Sys.time())
expect_error(gather(df, key, val, -x), "a POSIXlt")
})
test_that("factors coerced to characters, not integers", {
df <- data.frame(
v1 = 1:3,
v2 = factor(letters[1:3])
)
expect_warning(out <- gather(df, k, v),
"attributes are not identical across measure variables")
expect_equal(out$v, c(1:3, letters[1:3]))
})
test_that("attributes of id variables are preserved", {
df <- data.frame(x = factor(1:3), y = 1:3, z = 3:1)
out <- gather(df, key, val, -x)
expect_equal(attributes(df$x), attributes(out$x))
})
test_that("common attributes are preserved", {
df <- data.frame(date1 = Sys.Date(), date2 = Sys.Date() + 10)
out <- gather(df, k, v)
expect_is(out$v, "Date")
})
test_that("varying attributes are dropped with a warning", {
df <- data.frame(
date1 = as.POSIXct(Sys.Date()),
date2 = Sys.Date() + 10
)
expect_warning(gather(df, k, v),
"attributes are not identical across measure variables")
})
test_that("gather preserves OBJECT bit on e.g. POSIXct", {
df <- data.frame(now = Sys.time())
out <- gather(df, k, v)
object_bit_set <- function(x) {
grepl("\\[OBJ", capture.output(.Internal(inspect(x)))[1])
}
expect_true(object_bit_set(out$v))
})
test_that("can handle list-columns", {
df <- dplyr::data_frame(x = 1:2, y = list("a", TRUE))
out <- gather(df, k, v, -y)
expect_identical(out$y, df$y)
})
|
library(ivpack)
scaling = function (mat, pop,th=30,delta=0,useIv=FALSE,dropMissingIv=FALSE)
{
d = NULL
mat = as.matrix(mat)
mat = get.list(mat)
mat = merge (mat, pop, by.x = 1, by.y = 1)
if (useIv){
colnames (mat) = c("geo", "Industry", "Count", "pop",'pop.iv')
if (dropMissingIv){
mat <- mat[!is.na(mat$pop.iv),]
} else {
mat[is.na(mat$pop.iv),"pop.iv"] <- 1
}
} else {
colnames (mat) = c("geo", "Industry", "Count", "pop")
}
for(i in unique (mat$Industry)){
xs = subset(mat, mat$Industry == i)
if (nrow(xs[xs$Count!=delta,])<th) {
beta <- NA
r.sq <- NA
std.err <- NA
} else {
xs$Count[xs$Count==0] <- NA
if (useIv) {
lm <- ivreg(log(xs$Count)~log(xs$pop)|log(xs$pop.iv))
} else {
lm = lm(log(xs$Count)~log(xs$pop))
}
beta = round (summary(lm)$coefficients[2, 1], digits = 3)
r.sq = summary(lm)$adj.r.squared
std.err = summary(lm)$coefficients[2, 2]
}
econ = unique (xs$Industry)
d = rbind(d, data.frame(econ, beta, r.sq, std.err))
}
colnames (d) = c("Industry", "Beta", "r.sq", "std.err")
return (d)
}
plotsegraph <- function(loc, value, sterr, wiskwidth, color = "grey", linewidth = 2) {
w <- wiskwidth/2
segments(x0 = loc, x1 = loc, y0 = value - sterr, y1 = value + sterr, col = color,
lwd = linewidth)
segments(x0 = loc - w, x1 = loc + w, y0 = value + sterr, y1 = value + sterr,
col = color, lwd = linewidth) # upper whiskers
segments(x0 = loc - w, x1 = loc + w, y0 = value - sterr, y1 = value - sterr,
col = color, lwd = linewidth) # lower whiskers
}
bottomQ <- function(df,th,use,delta) {
if (use=='claim'){
useLocal = 'cl'
} else {
useLocal = 'pat'
}
bottom <- df[,paste0('top',100-th+5,'pc.',useLocal)]
if (100-th+10<=100) {
for (i in seq(100-th+10,100,5)){
col <- paste0('top',i,'pc.',useLocal)
bottom = bottom+df[,col]
}
}
bottom <- bottom+delta
return(bottom)
}
topQ <- function(df,th,use,delta) {
if (use=='claim'){
useLocal = 'cl'
} else {
useLocal = 'pat'
}
top <- df[,paste0('top',5,'pc.',useLocal)]
if (th>=10) {
for (i in seq(10,th,5)){
col <- paste0('top',i,'pc.',useLocal)
top = top+df[,col]
}
}
top <- top+delta
return(top)
}
scalingDec <- function(df,outcol){
d = NULL
df = df[complete.cases(df),]
for (i in unique (df$dec)) {
c = subset (df, df$dec == i)
m = get.matrix(c[, c("CBSA", "dec",outcol)])
p = unique(c[, c("CBSA", "newpop")])
scal = scaling(m, p)
d = rbind (d, scal)
}
d = d[order(d$Industry),]
colnames (d) = c("Decade", "Beta.pat", "r.sq.pat", "std.err.pat")
return(d)
}
scalingDecByCat <- function(df,use){
d = NULL
for (i in unique (df$ID)) {
c = subset (df, df$ID == i)
m = get.matrix(c[, c("CBSA", "ID",paste0(use,".count"))])
p = unique(c[, c("CBSA", "newpop")])
scal = scaling(m, p)
d = rbind (d, scal)
}
d = d[order(d$Industry),]
d$Decade = substr(d$Industry, 0, 4)
d$Cat = trimws(substr(d$Industry, 6, 200))
d = d[, c("Cat", "Decade", "Beta", "r.sq", "std.err")]
colnames (d) = c("Cat", "Decade", "Beta.Pat", "r.sq.Pat", "std.err.Pat")
return(d)
}
| /2.Functions/crunching.R | no_license | crisjf/comp-scaling | R | false | false | 3,283 | r | library(ivpack)
scaling = function (mat, pop,th=30,delta=0,useIv=FALSE,dropMissingIv=FALSE)
{
d = NULL
mat = as.matrix(mat)
mat = get.list(mat)
mat = merge (mat, pop, by.x = 1, by.y = 1)
if (useIv){
colnames (mat) = c("geo", "Industry", "Count", "pop",'pop.iv')
if (dropMissingIv){
mat <- mat[!is.na(mat$pop.iv),]
} else {
mat[is.na(mat$pop.iv),"pop.iv"] <- 1
}
} else {
colnames (mat) = c("geo", "Industry", "Count", "pop")
}
for(i in unique (mat$Industry)){
xs = subset(mat, mat$Industry == i)
if (nrow(xs[xs$Count!=delta,])<th) {
beta <- NA
r.sq <- NA
std.err <- NA
} else {
xs$Count[xs$Count==0] <- NA
if (useIv) {
lm <- ivreg(log(xs$Count)~log(xs$pop)|log(xs$pop.iv))
} else {
lm = lm(log(xs$Count)~log(xs$pop))
}
beta = round (summary(lm)$coefficients[2, 1], digits = 3)
r.sq = summary(lm)$adj.r.squared
std.err = summary(lm)$coefficients[2, 2]
}
econ = unique (xs$Industry)
d = rbind(d, data.frame(econ, beta, r.sq, std.err))
}
colnames (d) = c("Industry", "Beta", "r.sq", "std.err")
return (d)
}
plotsegraph <- function(loc, value, sterr, wiskwidth, color = "grey", linewidth = 2) {
w <- wiskwidth/2
segments(x0 = loc, x1 = loc, y0 = value - sterr, y1 = value + sterr, col = color,
lwd = linewidth)
segments(x0 = loc - w, x1 = loc + w, y0 = value + sterr, y1 = value + sterr,
col = color, lwd = linewidth) # upper whiskers
segments(x0 = loc - w, x1 = loc + w, y0 = value - sterr, y1 = value - sterr,
col = color, lwd = linewidth) # lower whiskers
}
bottomQ <- function(df,th,use,delta) {
if (use=='claim'){
useLocal = 'cl'
} else {
useLocal = 'pat'
}
bottom <- df[,paste0('top',100-th+5,'pc.',useLocal)]
if (100-th+10<=100) {
for (i in seq(100-th+10,100,5)){
col <- paste0('top',i,'pc.',useLocal)
bottom = bottom+df[,col]
}
}
bottom <- bottom+delta
return(bottom)
}
topQ <- function(df,th,use,delta) {
if (use=='claim'){
useLocal = 'cl'
} else {
useLocal = 'pat'
}
top <- df[,paste0('top',5,'pc.',useLocal)]
if (th>=10) {
for (i in seq(10,th,5)){
col <- paste0('top',i,'pc.',useLocal)
top = top+df[,col]
}
}
top <- top+delta
return(top)
}
scalingDec <- function(df,outcol){
d = NULL
df = df[complete.cases(df),]
for (i in unique (df$dec)) {
c = subset (df, df$dec == i)
m = get.matrix(c[, c("CBSA", "dec",outcol)])
p = unique(c[, c("CBSA", "newpop")])
scal = scaling(m, p)
d = rbind (d, scal)
}
d = d[order(d$Industry),]
colnames (d) = c("Decade", "Beta.pat", "r.sq.pat", "std.err.pat")
return(d)
}
scalingDecByCat <- function(df,use){
d = NULL
for (i in unique (df$ID)) {
c = subset (df, df$ID == i)
m = get.matrix(c[, c("CBSA", "ID",paste0(use,".count"))])
p = unique(c[, c("CBSA", "newpop")])
scal = scaling(m, p)
d = rbind (d, scal)
}
d = d[order(d$Industry),]
d$Decade = substr(d$Industry, 0, 4)
d$Cat = trimws(substr(d$Industry, 6, 200))
d = d[, c("Cat", "Decade", "Beta", "r.sq", "std.err")]
colnames (d) = c("Cat", "Decade", "Beta.Pat", "r.sq.Pat", "std.err.Pat")
return(d)
}
|
# DATA READIN -------------------------------------------------------------
dataset = read.csv("winemag-data-130k-v2.csv")
dataset[dataset==""]=NA
dataset = na.omit(dataset)
dataset=dataset[,-c(1,6,7,8)]
choiceWines = c("Pinot Gris","Pinot Noir","Cabernet Sauvignon","Chardonnay","Malbec","Zinfandel","Riesling","Merlot")
dataset = dataset[grepl(paste(choiceWines,collapse="|"),
dataset[,5]),]
dataset = dataset[!grepl("Franc",dataset[,5]),]
dataset = dataset[!grepl("Ã",dataset[,5]),]
omitWines = c("Cabernet Merlot","Viognier","Merlot-Cabernet","Blanc","Semillon","Syrah","Johannisberg","Sangiovese",
"Shiraz","Barbera","Tempranillo","Merlot-Malbec","Tannat","Chardonnay-Riesling","White","Pinot-Chardonnay",
"Malbec-Cabernet","Malbec-Merlot","Sauvignon-Malbec","Riesling-Chardonnay","Sauvignon-Merlot")
dataset = dataset[!grepl(paste(omitWines,collapse="|"),dataset[,5]),]
dataset$province = droplevels(dataset$province)
# VARIABLE CREATION -------------------------------------------------------
# DRY
dry = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("dry",dataset[i,1],ignore.case=TRUE)==TRUE)
{
dry[i]=1
}
}
# TANNIC
tannic = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("tannic",dataset[i,1],ignore.case=TRUE)==TRUE)
{
tannic[i]=1
}
}
# SOFT
soft = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("soft",dataset[i,1],ignore.case=TRUE)==TRUE)
{
soft[i]=1
}
}
# CHERRY
cherry = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("cherry",dataset[i,1],ignore.case=TRUE)==TRUE)
{
cherry[i]=1
}
}
# STIFF
stiff = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("stiff",dataset[i,1],ignore.case=TRUE)==TRUE)
{
stiff[i]=1
}
}
# THICK
thick = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("thick",dataset[i,1],ignore.case=TRUE)==TRUE)
{
thick[i]=1
}
}
# CREAMY
creamy = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("creamy",dataset[i,1],ignore.case=TRUE)==TRUE)
{
creamy[i]=1
}
}
# TART
tart = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("tart",dataset[i,1],ignore.case=TRUE)==TRUE)
{
tart[i]=1
}
}
# BITTER
bitter = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("bitter",dataset[i,1],ignore.case=TRUE)==TRUE)
{
bitter[i]=1
}
}
# BERRY
berry = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("berry",dataset[i,1],ignore.case=TRUE)==TRUE)
{
berry[i]=1
}
}
# RED
red = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("red",dataset[i,1],ignore.case=TRUE)==TRUE)
{
red[i]=1
}
}
# WHITE
white = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("white",dataset[i,1],ignore.case=TRUE)==TRUE)
{
white[i]=1
}
}
# PEACH
peach = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("peach",dataset[i,1],ignore.case=TRUE)==TRUE)
{
peach[i]=1
}
}
# DEEP
deep = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("deep",dataset[i,1],ignore.case=TRUE)==TRUE)
{
deep[i]=1
}
}
# OAK
oak = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("oak",dataset[i,1],ignore.case=TRUE)==TRUE)
{
oak[i]=1
}
}
# SMOKY
smoky = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("smoky",dataset[i,1],ignore.case=TRUE)==TRUE)
{
smoky[i]=1
}
}
# RICH
rich = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("rich",dataset[i,1],ignore.case=TRUE)==TRUE)
{
rich[i]=1
}
}
# VANILLA
vanilla = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("vanilla",dataset[i,1],ignore.case=TRUE)==TRUE)
{
vanilla[i]=1
}
}
# MEDIUM
medium = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("medium",dataset[i,1],ignore.case=TRUE)==TRUE)
{
medium[i]=1
}
}
# PEPPER
pepper = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("pepper",dataset[i,1],ignore.case=TRUE)==TRUE)
{
pepper[i]=1
}
}
# PLUM
plum = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("plum",dataset[i,1],ignore.case=TRUE)==TRUE)
{
plum[i]=1
}
}
# CRISP
crisp = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("crisp",dataset[i,1],ignore.case=TRUE)==TRUE)
{
crisp[i]=1
}
}
# SWEET
sweet = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("sweet",dataset[i,1],ignore.case=TRUE)==TRUE)
{
sweet[i]=1
}
}
# DARK
dark = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("dark",dataset[i,1],ignore.case=TRUE)==TRUE)
{
dark[i]=1
}
}
# FULL-BODIED
fullBodied = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("full-bodied",dataset[i,1],ignore.case=TRUE)==TRUE)
{
fullBodied[i]=1
}
}
# FULL-BODIED_s
fullBodied_s = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("full bodied",dataset[i,1],ignore.case=TRUE)==TRUE)
{
fullBodied_s[i]=1
}
}
# MEDIUM-BODIED
mediumBodied = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("medium-bodied",dataset[i,1],ignore.case=TRUE)==TRUE)
{
mediumBodied[i]=1
}
}
# MEDIUM-BODIED_s
mediumBodied_s = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("medium bodied",dataset[i,1],ignore.case=TRUE)==TRUE)
{
mediumBodied_s[i]=1
}
}
# REFRESHING
refreshing = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("refreshing",dataset[i,1],ignore.case=TRUE)==TRUE)
{
refreshing[i]=1
}
}
# spice
spice = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("spice",dataset[i,1],ignore.case=TRUE)==TRUE)
{
spice[i]=1
}
}
# SAVORY
savory = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("savory",dataset[i,1],ignore.case=TRUE)==TRUE)
{
savory[i]=1
}
}
# ROUND
round = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("round",dataset[i,1],ignore.case=TRUE)==TRUE)
{
round[i]=1
}
}
# ORANGE
orange = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("orange",dataset[i,1],ignore.case=TRUE)==TRUE)
{
orange[i]=1
}
}
# SMOOTH
smooth = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("smooth",dataset[i,1],ignore.case=TRUE)==TRUE)
{
smooth[i]=1
}
}
# BRIGHT
bright = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("bright",dataset[i,1],ignore.case=TRUE)==TRUE)
{
bright[i]=1
}
}
# SPICY
spicy = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("spicy",dataset[i,1],ignore.case=TRUE)==TRUE)
{
spicy[i]=1
}
}
# STONE
stone = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("stone",dataset[i,1],ignore.case=TRUE)==TRUE)
{
stone[i]=1
}
}
# LICORICE
licorice = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("licorice",dataset[i,1],ignore.case=TRUE)==TRUE)
{
licorice[i]=1
}
}
# LIVELY
lively = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("lively",dataset[i,1],ignore.case=TRUE)==TRUE)
{
lively[i]=1
}
}
# FRUITY
fruity = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("fruity",dataset[i,1],ignore.case=TRUE)==TRUE)
{
fruity[i]=1
}
}
# MINERAL
mineral = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("mineral",dataset[i,1],ignore.case=TRUE)==TRUE)
{
mineral[i]=1
}
}
# CHERRIES
cherries = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("cherries",dataset[i,1],ignore.case=TRUE)==TRUE)
{
cherries[i]=1
}
}
# CHARLKY
chalky = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("chalky",dataset[i,1],ignore.case=TRUE)==TRUE)
{
chalky[i]=1
}
}
# DENSE
dense = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("dense",dataset[i,1],ignore.case=TRUE)==TRUE)
{
dense[i]=1
}
}
# RASPBERRY
raspberry = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("raspberry",dataset[i,1],ignore.case=TRUE)==TRUE)
{
raspberry[i]=1
}
}
# GRAPHITE
graphite = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("graphite",dataset[i,1],ignore.case=TRUE)==TRUE)
{
graphite[i]=1
}
}
# TOAST
toast = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("toast",dataset[i,1],ignore.case=TRUE)==TRUE)
{
toast[i]=1
}
}
# TOBACCO
tobacco = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("tobacco",dataset[i,1],ignore.case=TRUE)==TRUE)
{
tobacco[i]=1
}
}
# STRUCTURED
structured = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("structured",dataset[i,1],ignore.case=TRUE)==TRUE)
{
structured[i]=1
}
}
# BLACK
black = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("black",dataset[i,1],ignore.case=TRUE)==TRUE)
{
black[i]=1
}
}
# BALANCED
balanced = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("balanced",dataset[i,1],ignore.case=TRUE)==TRUE)
{
balanced[i]=1
}
}
# BALSAMIC
balsamic = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("balsamic",dataset[i,1],ignore.case=TRUE)==TRUE)
{
balsamic[i]=1
}
}
# COOL
cool = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("cool",dataset[i,1],ignore.case=TRUE)==TRUE)
{
cool[i]=1
}
}
# JAM
jam = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("jam",dataset[i,1],ignore.case=TRUE)==TRUE)
{
jam[i]=1
}
}
# CINNAMON
cinnamon = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("cinnamon",dataset[i,1],ignore.case=TRUE)==TRUE)
{
cinnamon[i]=1
}
}
# CINNAMON
cinnamon = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("cinnamon",dataset[i,1],ignore.case=TRUE)==TRUE)
{
cinnamon[i]=1
}
}
# CINNAMON
cinnamon = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("cinnamon",dataset[i,1],ignore.case=TRUE)==TRUE)
{
cinnamon[i]=1
}
}
# TANG
tang = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("tang",dataset[i,1],ignore.case=TRUE)==TRUE)
{
tang[i]=1
}
}
# COFFEE
coffee = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("coffee",dataset[i,1],ignore.case=TRUE)==TRUE)
{
coffee[i]=1
}
}
# TESTED WITH 33% error
# BLACKBERRY
blackberry = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("blackberry",dataset[i,1],ignore.case=TRUE)==TRUE)
{
blackberry[i]=1
}
}
# ROAST
roast = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("roast",dataset[i,1],ignore.case=TRUE)==TRUE)
{
roast[i]=1
}
}
# FIRM
firm = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("firm",dataset[i,1],ignore.case=TRUE)==TRUE)
{
firm[i]=1
}
}
# CEDAR
cedar = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("cedar",dataset[i,1],ignore.case=TRUE)==TRUE)
{
cedar[i]=1
}
}
# BEEF
beef = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("beef",dataset[i,1],ignore.case=TRUE)==TRUE)
{
beef[i]=1
}
}
# COMPLEX
complex = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("complex",dataset[i,1],ignore.case=TRUE)==TRUE)
{
complex[i]=1
}
}
# CRANBERRY
cranberry = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("cranberry",dataset[i,1],ignore.case=TRUE)==TRUE)
{
cranberry[i]=1
}
}
# CARAMEL
caramel = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("caramel",dataset[i,1],ignore.case=TRUE)==TRUE)
{
caramel[i]=1
}
}
# MOCHA
mocha = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("mocha",dataset[i,1],ignore.case=TRUE)==TRUE)
{
mocha[i]=1
}
}
# rose
rose = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("rose",dataset[i,1],ignore.case=TRUE)==TRUE)
{
mocha[i]=1
}
}
# INTENSE
intense = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("intense",dataset[i,1],ignore.case=TRUE)==TRUE)
{
intense[i]=1
}
}
# CONCENTRATED
concentrated = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("concentrated",dataset[i,1],ignore.case=TRUE)==TRUE)
{
concentrated[i]=1
}
}
# EARTHY
earthy = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("earthy",taset[i,1],ignore.case=TRUE)==TRUE)
{
earthy[i]=1
}
}
# STRONG
strong = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("strong",dataset[i,1],ignore.case=TRUE)==TRUE)
{
strong[i]=1
}
}
# SOUR
sour = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("sour",dataset[i,1],ignore.case=TRUE)==TRUE)
{
sour[i]=1
}
}
# MEAT
meat = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("meat",dataset[i,1],ignore.case=TRUE)==TRUE)
{
meat[i]=1
}
}
# CASSIS
cassis = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("cassis",dataset[i,1],ignore.case=TRUE)==TRUE)
{
cassis[i]=1
}
}
# SHARP
sharp = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("sharp",dataset[i,1],ignore.case=TRUE)==TRUE)
{
sharp[i]=1
}
}
# VINTAGE
vintage = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("vintage",dataset[i,1],ignore.case=TRUE)==TRUE)
{
vintage[i]=1
}
}
# BLACK CHERRY
bcf = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("black cherry",dataset[i,1],ignore.case=TRUE)==TRUE)
{
bcf[i]=1
}
}
# FRENCH OAK
nfo = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("french oak",dataset[i,1],ignore.case=TRUE)==TRUE)
{
nfo[i]=1
}
}
# FULL BODIED
fbw = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("full bodied",dataset[i,1],ignore.case=TRUE)==TRUE)
{
fbw[i]=1
}
}
# MEDIUM BODIED
mbw = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("medium bodied",dataset[i,1],ignore.case=TRUE)==TRUE)
{
mbw[i]=1
}
}
# FRUIT FLAVORS
tff = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("fruit flavors",dataset[i,1],ignore.case=TRUE)==TRUE)
{
tff[i]=1
}
}
# SWEET SMOKY OAK
sso = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("sweet smoky oak",dataset[i,1],ignore.case=TRUE)==TRUE)
{
sso[i]=1
}
}
# STONE FRUIT FLAVORS
sff = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("stone fruit flavors",dataset[i,1],ignore.case=TRUE)==TRUE)
{
sff[i]=1
}
}
# FINE GRAINED TANNINS
fgt = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("fine grained tannins",dataset[i,1],ignore.case=TRUE)==TRUE)
{
fgt[i]=1
}
}
# DARK FRUIT FLAVORS
dff = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("dark fruit flavors",dataset[i,1],ignore.case=TRUE)==TRUE)
{
dff[i]=1
}
}
# BLACK FRUIT FLAVORS
bff = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("black fruit flavors",dataset[i,1],ignore.case=TRUE)==TRUE)
{
bff[i]=1
}
}
# LIGHT IN COLOR
lic = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("light in color",dataset[i,1],ignore.case=TRUE)==TRUE)
{
lic[i]=1
}
}
# HIGH IN ALCOHOL
hia = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("high in alcohol",dataset[i,1],ignore.case=TRUE)==TRUE)
{
hia[i]=1
}
}
# RUSSIAN RIVER VALLEY
rrv = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("russian river valley",dataset[i,1],ignore.case=TRUE)==TRUE)
{
rrv[i]=1
}
}
# BUTTERED TOAST
bt = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("buttered toast",dataset[i,1],ignore.case=TRUE)==TRUE)
{
bt[i]=1
}
}
# BLACK PEPPER
bp = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("black pepper",dataset[i,1],ignore.case=TRUE)==TRUE)
{
bp[i]=1
}
}
# RED CHERRY
rc = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("red cherry",dataset[i,1],ignore.case=TRUE)==TRUE)
{
rc[i]=1
}
}
# CRISP ACIDITY
ca = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("crisp acidity",dataset[i,1],ignore.case=TRUE)==TRUE)
{
ca[i]=1
}
}
# FIRM TANNINS
ft = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("firm tannins",dataset[i,1],ignore.case=TRUE)==TRUE)
{
ft[i]=1
}
}
# LIGHT BODIED
lbodied = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("light bodied",dataset[i,1],ignore.case=TRUE)==TRUE)
{
lbodied[i]=1
}
}
# BLACK PLUM
bplum = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("black plum",dataset[i,1],ignore.case=TRUE)==TRUE)
{
bplum[i]=1
}
}
# DRY
bonedry = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("dry",dataset[i,1],ignore.case=TRUE)==TRUE)
{
bonedry[i]=1
}
}
# BLACK CHERRIES
blkcherries = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("black cherries",dataset[i,1],ignore.case=TRUE)==TRUE)
{
blkcherries[i]=1
}
}
# CHERRY FLAVORS
chflavors = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("cherry flavors",dataset[i,1],ignore.case=TRUE)==TRUE)
{
bt[i]=1
}
}
# DARK CHOCOLATE
drkchocolate = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("dark chocolate",dataset[i,1],ignore.case=TRUE)==TRUE)
{
drkchocolate[i]=1
}
}
# BLACK CURRANT
blkcurrant = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("black currant",dataset[i,1],ignore.case=TRUE)==TRUE)
{
blkcurrant[i]=1
}
}
# OAK FLAVORS
oakflavors = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("oak flavors",dataset[i,1],ignore.case=TRUE)==TRUE)
{
oakflavors[i]=1
}
}
# SPICE FLAVORS
spflavors = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("spice flavors",dataset[i,1],ignore.case=TRUE)==TRUE)
{
spflavors[i]=1
}
}
# GREEN APPLE
greenapple = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("green apple",dataset[i,1],ignore.case=TRUE)==TRUE)
{
greenapple[i]=1
}
}
# TROPICAL FRUIT
trpfruit = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("tropical fruit",dataset[i,1],ignore.case=TRUE)==TRUE)
{
trpfruit[i]=1
}
}
# RED CURRANT
redcurrant = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("red currant",dataset[i,1],ignore.case=TRUE)==TRUE)
{
redcurrant[i]=1
}
}
# PETIT VERDOT
petitverdot = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("petit verdot",dataset[i,1],ignore.case=TRUE)==TRUE)
{
petitverdot[i]=1
}
}
# HIGH TONED
hightoned = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("high toned",dataset[i,1],ignore.case=TRUE)==TRUE)
{
hightoned[i]=1
}
}
# VARIETAL WINE
varietalwine = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("varietal wine",dataset[i,1],ignore.case=TRUE)==TRUE)
{
varietalwine[i]=1
}
}
# WHITE PEPPER
whitepepper = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("white pepper",dataset[i,1],ignore.case=TRUE)==TRUE)
{
whitepepper[i]=1
}
}
# BRISK ACITITY
briskacidity = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("brisk acidity",dataset[i,1],ignore.case=TRUE)==TRUE)
{
briskacidity[i]=1
}
}
# BRIGHT ACIDITY
brightacidity = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("bright acidity",dataset[i,1],ignore.case=TRUE)==TRUE)
{
brightacidity[i]=1
}
}
# BLACK LICORICE
blacklicorice = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("black licorice",dataset[i,1],ignore.case=TRUE)==TRUE)
{
blacklicorice[i]=1
}
}
# BARREL SPICE
barrelspice = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("barrel spice",dataset[i,1],ignore.case=TRUE)==TRUE)
{
barrelspice[i]=1
}
}
# GRAINED TANNINS
grainedtannins = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("grained tannins",dataset[i,1],ignore.case=TRUE)==TRUE)
{
grainedtannins[i]=1
}
}
# SMOOTH TANNINS
smoothtannins = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("smooth tannins",dataset[i,1],ignore.case=TRUE)==TRUE)
{
smoothtannins[i]=1
}
}
# 100% VARIETAL
varietal = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("100% varietal",dataset[i,1],ignore.case=TRUE)==TRUE)
{
varietal[i]=1
}
}
# SWEET
sweet = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("sweet",dataset[i,1],ignore.case=TRUE)==TRUE)
{
sweet[i]=1
}
}
# DARK RED
darkred = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("dark red",dataset[i,1],ignore.case=TRUE)==TRUE)
{
darkred[i]=1
}
}
# BLACK TEA
blacktea = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("black tea",dataset[i,1],ignore.case=TRUE)==TRUE)
{
blacktea[i]=1
}
}
# FOREST FLOOR
forestfloor = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("forest floor",dataset[i,1],ignore.case=TRUE)==TRUE)
{
forestfloor[i]=1
}
}
# ROSE PETALS
rosepetals = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("rose petals",dataset[i,1],ignore.case=TRUE)==TRUE)
{
rosepetals[i]=1
}
}
# SILKY TEXTURE
silkytexture = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("silky texture",dataset[i,1],ignore.case=TRUE)==TRUE)
{
silkytexture[i]=1
}
}
# SOUR CHERRY
sourcherry = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("sour cherry",dataset[i,1],ignore.case=TRUE)==TRUE)
{
sourcherry[i]=1
}
}
# CABERNET SAUVIGNON
cabernet = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("cabernet sauvignon",dataset[i,1],ignore.case=TRUE)==TRUE)
{
cabernet[i]=1
}
}
# Chardonnay
chardonnay = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("chardonnay",dataset[i,1],ignore.case=TRUE)==TRUE)
{
chardonnay[i]=1
}
}
# MALBEC
malbec = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("malbec",dataset[i,1],ignore.case=TRUE)==TRUE)
{
malbec[i]=1
}
}
# MERLOT
merlot = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("merlot",dataset[i,1],ignore.case=TRUE)==TRUE)
{
merlot[i]=1
}
}
# PINOT GRIS
pinotgris = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("pinot gris",dataset[i,1],ignore.case=TRUE)==TRUE)
{
pinotgris[i]=1
}
}
# PINOT NOIR
pinotnoir = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("pinot noir",dataset[i,1],ignore.case=TRUE)==TRUE)
{
pinotnoir[i]=1
}
}
# RIESLING
riesling = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("riesling",dataset[i,1],ignore.case=TRUE)==TRUE)
{
riesling[i]=1
}
}
# ZINFANDEL
zinfandel = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("zinfandel",dataset[i,1],ignore.case=TRUE)==TRUE)
{
zinfandel[i]=1
}
}
# Add Almond, Apple, Apricot,
# berries, blueberry, candied,
# chocolate,
# clean, cocoa,
# cranberry, currant,
# easy, elegant, espresso, flower, fragrant,
# grapefruit, green,
# grilled, herb, honey,leather, lemon,
#
# racy(21),
#
# silky(25),
#
# subtle(23), supple(24),
# thyme(21),
# tropical(30), velvety(21), vibrant(36),
#
# UPDATED DATASET ---------------------------------------------------------
fullData = cbind.data.frame(factor(dataset[,5]),dataset[,2],dataset[,3],dataset[,4],
dry,tannic,soft,cherry,stiff,thick,
creamy,tart,bitter,berry,red,white,peach,deep,oak,
smoky,rich,vanilla,medium,pepper,plum,crisp,sweet,dark,
fullBodied,fullBodied_s,mediumBodied,mediumBodied_s,refreshing,spice,
savory,round,orange,smooth,bright,spicy,stone,licorice,lively,fruity,
mineral,cherries,chalky,dense,raspberry,graphite,
toast,tobacco,structured,black,balanced,balsamic,cool,cinnamon,
tang,jam,coffee,blackberry,roast,firm,cedar,beef,complex,cranberry,
caramel,mocha,rose,intense,concentrated,earthy,strong,sour,meat,
cassis,sharp,vintage,bcf,nfo,fbw,mbw,tff,sso,sff,fgt,dff,
bff,lic,hia,rrv,bt,bp,rc,ca,ft,lbodied,bplum,bonedry,blkcherries,drkchocolate,
blkcurrant,oakflavors,spflavors,greenapple,trpfruit,redcurrant,petitverdot,
hightoned,varietalwine,whitepepper,briskacidity,brightacidity,blacklicorice,
barrelspice,grainedtannins,smoothtannins,cabernet,chardonnay,malbec,merlot,
pinotgris,pinotnoir,riesling,zinfandel)
names(fullData)[1] <- "variety"
names(fullData)[2] <- "score"
names(fullData)[3] <- "price"
names(fullData)[4] <- "province"
write.csv(fullData,file="fullData.csv",row.names=FALSE)
# TRAIN/TEST --------------------------------------------------------------
train=sample(length(fullData[,3]),length(fullData[,3])/1.75)
test = (-train)
X_train = fullData[train,-1]
y_train = fullData[train,1]
X_test = fullData[test,-1]
y_test = fullData[test,1]
# RANDOM FOREST -----------------------------------------------------------
library(randomForest)
# tuneModel = tuneRF(X_train,y_train,mtryStart=20,ntreeTry=250,stepFactor=2,improve=0.01)
wineCutoff = c(.125,.125,.125,.125,.125,.125,.125,.125)
rfModel = randomForest(x=X_train,y=y_train,ntree=20,mtry=20,importance=TRUE,
type = "classification",cutoff = wineCutoff,do.trace=TRUE)
rfModel
importance(rfModel)
plot(rfModel,main="Random Forest Error Plot")
rfPred = predict(rfModel,newdata = X_test)
mean(y_test == rfPred)
table(y_test,rfPred)
importance(rfModel)
# Using h2o Framework
library(h2o)
h2o.init(nthreads=-1)
fullData_rf <- h2o.importFile(path="fullData.csv")
fullData_rf.split <- h2o.splitFrame(data=fullData_rf, ratios=0.75)
nnTrain = fullData_rf.split[[1]]
nnTest = fullData_rf.split[[2]]
h2oRFModel = h2o.randomForest(y='variety',training_frame=nnTrain,validation_frame=nnTest,ntrees=2000,max_depth=25)
h2oRFPred = h2o.pred(h2oRFModel,newdata=nnTest)
RFPreds = as.data.frame(h2oRFPred)[,1]
mean(RFPreds!=as.data.frame(nnTest)[,1])
# NEURAL NETWORK ----------------------------------------------------------
library(h2o)
h2o.init(nthreads=-1)
fullData_nn <- h2o.importFile(path="fullData.csv")
fullData_nn.split <- h2o.splitFrame(data=fullData_nn, ratios=0.75) # Original ratios 0.75
nnTrain = fullData_nn.split[[1]]
nnTest = fullData_nn.split[[2]]
minError = .4
for(i in 21:30) #15,13||12,10* -- .24646782 13,14 --.2185
{
for(j in 21:30)
{
dlModel = h2o.deeplearning(y='variety',training_frame=nnTrain,
activation='Maxout',
epochs=25,train_samples_per_iteration=-2,loss='CrossEntropy',
hidden=c(12,12))
dlPred = h2o.predict(dlModel,newdata=nnTest)
dlPred = as.data.frame(dlPred)[,1]
if(mean(dlPred!=as.data.frame(nnTest)[,1])<minError)
{
print(c("Min Error with layers ",j,i,mean(dlPred!=as.data.frame(nnTest)[,1])))
minError = mean(dlPred!=as.data.frame(nnTest)[,1])
}
}
}
# errorVec = rep(0,33)
for(k in seq(from=10,to=30,by=1))
{
dlModel = h2o.deeplearning(y='variety',training_frame=nnTrain,activation='Maxout',
epochs=20,train_samples_per_iteration=-2,loss='CrossEntropy',
hidden=c(k))
dlPred = h2o.predict(dlModel,newdata=nnTest)
dlPred = as.data.frame(dlPred)[,1]
# errorVec[k] = mean(dlPred!=as.data.frame(nnTest)[,1])
print(mean(dlPred!=as.data.frame(nnTest)[,1]))
if(mean(dlPred!=as.data.frame(nnTest)[,1])<minError)
{
print(c("Min Error with layers ",j,i,mean(dlPred!=as.data.frame(nnTest)[,1])))
print(c("Layer 1: ",k))
minError = mean(dlPred!=as.data.frame(nnTest)[,1])
}
}
errorVec[errorVec==0] <- NA
errorVec<-errorVec[complete.cases(errorVec)]
plot(errorVec,xlim=c(1,33),ylab="Error",type="l")
dlModel = h2o.deeplearning(y='variety',training_frame=nnTrain,activation='Maxout',
epochs=20,train_samples_per_iteration=-2,loss='CrossEntropy',
hidden=c(12,10))
dlPred = h2o.predict(dlModel,newdata=nnTest[5,-1])
dlPred = as.data.frame(dlPred)[,1]
# table(dlPred,as.data.frame(nnTest)[,1])
mean(dlPred!=as.data.frame(nnTest)[,1])
# Binary Classification ---------------------------------------------------
# PINOT NOIR
datasetPinot = fullData
datasetPinot$variety = ifelse(datasetPinot$variety=="Pinot Noir",1,0)
train = sample(length(datasetPinot$variety),length(datasetPinot$variety)/1.67)
test = (-train)
library(gbm)
gbmModel = gbm(variety~.,data=datasetPinot[train,],distribution="adaboost",shrinkage=0.01,n.cores=4,n.trees=3000,interaction.depth=1)
gbmPred = predict(gbmModel,newdata=datasetPinot[test,],n.trees=3000,type="response")
gbmPred = ifelse(gbmPred>0.3,1,0)
table(gbmPred,datasetPinot$variety[test])
# MALBEC
datasetMalbec = fullData
datasetMalbec$variety = ifelse(datasetMalbec$variety=="Malbec",1,0)
train = sample(length(datasetMalbec$variety),length(datasetMalbec$variety)/1.67)
test = (-train)
library(gbm)
gbmModel = gbm(variety~.,data=datasetMalbec[train,],distribution="adaboost",shrinkage=0.01,n.cores=4,n.trees=3000,interaction.depth=2)
gbmPred = predict(gbmModel,newdata=datasetMalbec[test,],n.trees=3000,type="response")
gbmPred = ifelse(gbmPred>0.3,1,0)
table(gbmPred,datasetMalbec$variety[test])
| /wineReviews.R | no_license | NicholasNikolov/2018-WineClassifier | R | false | false | 32,453 | r |
# DATA READIN -------------------------------------------------------------
dataset = read.csv("winemag-data-130k-v2.csv")
dataset[dataset==""]=NA
dataset = na.omit(dataset)
dataset=dataset[,-c(1,6,7,8)]
choiceWines = c("Pinot Gris","Pinot Noir","Cabernet Sauvignon","Chardonnay","Malbec","Zinfandel","Riesling","Merlot")
dataset = dataset[grepl(paste(choiceWines,collapse="|"),
dataset[,5]),]
dataset = dataset[!grepl("Franc",dataset[,5]),]
dataset = dataset[!grepl("Ã",dataset[,5]),]
omitWines = c("Cabernet Merlot","Viognier","Merlot-Cabernet","Blanc","Semillon","Syrah","Johannisberg","Sangiovese",
"Shiraz","Barbera","Tempranillo","Merlot-Malbec","Tannat","Chardonnay-Riesling","White","Pinot-Chardonnay",
"Malbec-Cabernet","Malbec-Merlot","Sauvignon-Malbec","Riesling-Chardonnay","Sauvignon-Merlot")
dataset = dataset[!grepl(paste(omitWines,collapse="|"),dataset[,5]),]
dataset$province = droplevels(dataset$province)
# VARIABLE CREATION -------------------------------------------------------
# DRY
dry = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("dry",dataset[i,1],ignore.case=TRUE)==TRUE)
{
dry[i]=1
}
}
# TANNIC
tannic = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("tannic",dataset[i,1],ignore.case=TRUE)==TRUE)
{
tannic[i]=1
}
}
# SOFT
soft = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("soft",dataset[i,1],ignore.case=TRUE)==TRUE)
{
soft[i]=1
}
}
# CHERRY
cherry = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("cherry",dataset[i,1],ignore.case=TRUE)==TRUE)
{
cherry[i]=1
}
}
# STIFF
stiff = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("stiff",dataset[i,1],ignore.case=TRUE)==TRUE)
{
stiff[i]=1
}
}
# THICK
thick = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("thick",dataset[i,1],ignore.case=TRUE)==TRUE)
{
thick[i]=1
}
}
# CREAMY
creamy = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("creamy",dataset[i,1],ignore.case=TRUE)==TRUE)
{
creamy[i]=1
}
}
# TART
tart = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("tart",dataset[i,1],ignore.case=TRUE)==TRUE)
{
tart[i]=1
}
}
# BITTER
bitter = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("bitter",dataset[i,1],ignore.case=TRUE)==TRUE)
{
bitter[i]=1
}
}
# BERRY
berry = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("berry",dataset[i,1],ignore.case=TRUE)==TRUE)
{
berry[i]=1
}
}
# RED
red = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("red",dataset[i,1],ignore.case=TRUE)==TRUE)
{
red[i]=1
}
}
# WHITE
white = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("white",dataset[i,1],ignore.case=TRUE)==TRUE)
{
white[i]=1
}
}
# PEACH
peach = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("peach",dataset[i,1],ignore.case=TRUE)==TRUE)
{
peach[i]=1
}
}
# DEEP
deep = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("deep",dataset[i,1],ignore.case=TRUE)==TRUE)
{
deep[i]=1
}
}
# OAK
oak = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("oak",dataset[i,1],ignore.case=TRUE)==TRUE)
{
oak[i]=1
}
}
# SMOKY
smoky = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("smoky",dataset[i,1],ignore.case=TRUE)==TRUE)
{
smoky[i]=1
}
}
# RICH
rich = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("rich",dataset[i,1],ignore.case=TRUE)==TRUE)
{
rich[i]=1
}
}
# VANILLA
vanilla = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("vanilla",dataset[i,1],ignore.case=TRUE)==TRUE)
{
vanilla[i]=1
}
}
# MEDIUM
medium = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("medium",dataset[i,1],ignore.case=TRUE)==TRUE)
{
medium[i]=1
}
}
# PEPPER
pepper = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("pepper",dataset[i,1],ignore.case=TRUE)==TRUE)
{
pepper[i]=1
}
}
# PLUM
plum = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("plum",dataset[i,1],ignore.case=TRUE)==TRUE)
{
plum[i]=1
}
}
# CRISP
crisp = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("crisp",dataset[i,1],ignore.case=TRUE)==TRUE)
{
crisp[i]=1
}
}
# SWEET
sweet = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("sweet",dataset[i,1],ignore.case=TRUE)==TRUE)
{
sweet[i]=1
}
}
# DARK
dark = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("dark",dataset[i,1],ignore.case=TRUE)==TRUE)
{
dark[i]=1
}
}
# FULL-BODIED
fullBodied = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("full-bodied",dataset[i,1],ignore.case=TRUE)==TRUE)
{
fullBodied[i]=1
}
}
# FULL-BODIED_s
fullBodied_s = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("full bodied",dataset[i,1],ignore.case=TRUE)==TRUE)
{
fullBodied_s[i]=1
}
}
# MEDIUM-BODIED
mediumBodied = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("medium-bodied",dataset[i,1],ignore.case=TRUE)==TRUE)
{
mediumBodied[i]=1
}
}
# MEDIUM-BODIED_s
mediumBodied_s = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("medium bodied",dataset[i,1],ignore.case=TRUE)==TRUE)
{
mediumBodied_s[i]=1
}
}
# REFRESHING
refreshing = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("refreshing",dataset[i,1],ignore.case=TRUE)==TRUE)
{
refreshing[i]=1
}
}
# spice
spice = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("spice",dataset[i,1],ignore.case=TRUE)==TRUE)
{
spice[i]=1
}
}
# SAVORY
savory = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("savory",dataset[i,1],ignore.case=TRUE)==TRUE)
{
savory[i]=1
}
}
# ROUND
round = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("round",dataset[i,1],ignore.case=TRUE)==TRUE)
{
round[i]=1
}
}
# ORANGE
orange = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("orange",dataset[i,1],ignore.case=TRUE)==TRUE)
{
orange[i]=1
}
}
# SMOOTH
smooth = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("smooth",dataset[i,1],ignore.case=TRUE)==TRUE)
{
smooth[i]=1
}
}
# BRIGHT
bright = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("bright",dataset[i,1],ignore.case=TRUE)==TRUE)
{
bright[i]=1
}
}
# SPICY
spicy = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("spicy",dataset[i,1],ignore.case=TRUE)==TRUE)
{
spicy[i]=1
}
}
# STONE
stone = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("stone",dataset[i,1],ignore.case=TRUE)==TRUE)
{
stone[i]=1
}
}
# LICORICE
licorice = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("licorice",dataset[i,1],ignore.case=TRUE)==TRUE)
{
licorice[i]=1
}
}
# LIVELY
lively = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("lively",dataset[i,1],ignore.case=TRUE)==TRUE)
{
lively[i]=1
}
}
# FRUITY
fruity = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("fruity",dataset[i,1],ignore.case=TRUE)==TRUE)
{
fruity[i]=1
}
}
# MINERAL
mineral = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("mineral",dataset[i,1],ignore.case=TRUE)==TRUE)
{
mineral[i]=1
}
}
# CHERRIES
cherries = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("cherries",dataset[i,1],ignore.case=TRUE)==TRUE)
{
cherries[i]=1
}
}
# CHARLKY
chalky = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("chalky",dataset[i,1],ignore.case=TRUE)==TRUE)
{
chalky[i]=1
}
}
# DENSE
dense = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("dense",dataset[i,1],ignore.case=TRUE)==TRUE)
{
dense[i]=1
}
}
# RASPBERRY
raspberry = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("raspberry",dataset[i,1],ignore.case=TRUE)==TRUE)
{
raspberry[i]=1
}
}
# GRAPHITE
graphite = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("graphite",dataset[i,1],ignore.case=TRUE)==TRUE)
{
graphite[i]=1
}
}
# TOAST
toast = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("toast",dataset[i,1],ignore.case=TRUE)==TRUE)
{
toast[i]=1
}
}
# TOBACCO
tobacco = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("tobacco",dataset[i,1],ignore.case=TRUE)==TRUE)
{
tobacco[i]=1
}
}
# STRUCTURED
structured = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("structured",dataset[i,1],ignore.case=TRUE)==TRUE)
{
structured[i]=1
}
}
# BLACK
black = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("black",dataset[i,1],ignore.case=TRUE)==TRUE)
{
black[i]=1
}
}
# BALANCED
balanced = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("balanced",dataset[i,1],ignore.case=TRUE)==TRUE)
{
balanced[i]=1
}
}
# BALSAMIC
balsamic = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("balsamic",dataset[i,1],ignore.case=TRUE)==TRUE)
{
balsamic[i]=1
}
}
# COOL
cool = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("cool",dataset[i,1],ignore.case=TRUE)==TRUE)
{
cool[i]=1
}
}
# JAM
jam = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("jam",dataset[i,1],ignore.case=TRUE)==TRUE)
{
jam[i]=1
}
}
# CINNAMON
cinnamon = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("cinnamon",dataset[i,1],ignore.case=TRUE)==TRUE)
{
cinnamon[i]=1
}
}
# CINNAMON
cinnamon = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("cinnamon",dataset[i,1],ignore.case=TRUE)==TRUE)
{
cinnamon[i]=1
}
}
# CINNAMON
cinnamon = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("cinnamon",dataset[i,1],ignore.case=TRUE)==TRUE)
{
cinnamon[i]=1
}
}
# TANG
tang = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("tang",dataset[i,1],ignore.case=TRUE)==TRUE)
{
tang[i]=1
}
}
# COFFEE
coffee = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("coffee",dataset[i,1],ignore.case=TRUE)==TRUE)
{
coffee[i]=1
}
}
# TESTED WITH 33% error
# BLACKBERRY
blackberry = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("blackberry",dataset[i,1],ignore.case=TRUE)==TRUE)
{
blackberry[i]=1
}
}
# ROAST
roast = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("roast",dataset[i,1],ignore.case=TRUE)==TRUE)
{
roast[i]=1
}
}
# FIRM
firm = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("firm",dataset[i,1],ignore.case=TRUE)==TRUE)
{
firm[i]=1
}
}
# CEDAR
cedar = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("cedar",dataset[i,1],ignore.case=TRUE)==TRUE)
{
cedar[i]=1
}
}
# BEEF
beef = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("beef",dataset[i,1],ignore.case=TRUE)==TRUE)
{
beef[i]=1
}
}
# COMPLEX
complex = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("complex",dataset[i,1],ignore.case=TRUE)==TRUE)
{
complex[i]=1
}
}
# CRANBERRY
cranberry = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("cranberry",dataset[i,1],ignore.case=TRUE)==TRUE)
{
cranberry[i]=1
}
}
# CARAMEL
caramel = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("caramel",dataset[i,1],ignore.case=TRUE)==TRUE)
{
caramel[i]=1
}
}
# MOCHA
mocha = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("mocha",dataset[i,1],ignore.case=TRUE)==TRUE)
{
mocha[i]=1
}
}
# rose
rose = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("rose",dataset[i,1],ignore.case=TRUE)==TRUE)
{
mocha[i]=1
}
}
# INTENSE
intense = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("intense",dataset[i,1],ignore.case=TRUE)==TRUE)
{
intense[i]=1
}
}
# CONCENTRATED
concentrated = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("concentrated",dataset[i,1],ignore.case=TRUE)==TRUE)
{
concentrated[i]=1
}
}
# EARTHY
earthy = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("earthy",taset[i,1],ignore.case=TRUE)==TRUE)
{
earthy[i]=1
}
}
# STRONG
strong = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("strong",dataset[i,1],ignore.case=TRUE)==TRUE)
{
strong[i]=1
}
}
# SOUR
sour = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("sour",dataset[i,1],ignore.case=TRUE)==TRUE)
{
sour[i]=1
}
}
# MEAT
meat = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("meat",dataset[i,1],ignore.case=TRUE)==TRUE)
{
meat[i]=1
}
}
# CASSIS
cassis = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("cassis",dataset[i,1],ignore.case=TRUE)==TRUE)
{
cassis[i]=1
}
}
# SHARP
sharp = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("sharp",dataset[i,1],ignore.case=TRUE)==TRUE)
{
sharp[i]=1
}
}
# VINTAGE
vintage = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("vintage",dataset[i,1],ignore.case=TRUE)==TRUE)
{
vintage[i]=1
}
}
# BLACK CHERRY
bcf = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("black cherry",dataset[i,1],ignore.case=TRUE)==TRUE)
{
bcf[i]=1
}
}
# FRENCH OAK
nfo = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("french oak",dataset[i,1],ignore.case=TRUE)==TRUE)
{
nfo[i]=1
}
}
# FULL BODIED
fbw = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("full bodied",dataset[i,1],ignore.case=TRUE)==TRUE)
{
fbw[i]=1
}
}
# MEDIUM BODIED
mbw = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("medium bodied",dataset[i,1],ignore.case=TRUE)==TRUE)
{
mbw[i]=1
}
}
# FRUIT FLAVORS
tff = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("fruit flavors",dataset[i,1],ignore.case=TRUE)==TRUE)
{
tff[i]=1
}
}
# SWEET SMOKY OAK
sso = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("sweet smoky oak",dataset[i,1],ignore.case=TRUE)==TRUE)
{
sso[i]=1
}
}
# STONE FRUIT FLAVORS
sff = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("stone fruit flavors",dataset[i,1],ignore.case=TRUE)==TRUE)
{
sff[i]=1
}
}
# FINE GRAINED TANNINS
fgt = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("fine grained tannins",dataset[i,1],ignore.case=TRUE)==TRUE)
{
fgt[i]=1
}
}
# DARK FRUIT FLAVORS
dff = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("dark fruit flavors",dataset[i,1],ignore.case=TRUE)==TRUE)
{
dff[i]=1
}
}
# BLACK FRUIT FLAVORS
bff = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("black fruit flavors",dataset[i,1],ignore.case=TRUE)==TRUE)
{
bff[i]=1
}
}
# LIGHT IN COLOR
lic = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("light in color",dataset[i,1],ignore.case=TRUE)==TRUE)
{
lic[i]=1
}
}
# HIGH IN ALCOHOL
hia = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("high in alcohol",dataset[i,1],ignore.case=TRUE)==TRUE)
{
hia[i]=1
}
}
# RUSSIAN RIVER VALLEY
rrv = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("russian river valley",dataset[i,1],ignore.case=TRUE)==TRUE)
{
rrv[i]=1
}
}
# BUTTERED TOAST
bt = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("buttered toast",dataset[i,1],ignore.case=TRUE)==TRUE)
{
bt[i]=1
}
}
# BLACK PEPPER
bp = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("black pepper",dataset[i,1],ignore.case=TRUE)==TRUE)
{
bp[i]=1
}
}
# RED CHERRY
rc = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("red cherry",dataset[i,1],ignore.case=TRUE)==TRUE)
{
rc[i]=1
}
}
# CRISP ACIDITY
ca = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("crisp acidity",dataset[i,1],ignore.case=TRUE)==TRUE)
{
ca[i]=1
}
}
# FIRM TANNINS
ft = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("firm tannins",dataset[i,1],ignore.case=TRUE)==TRUE)
{
ft[i]=1
}
}
# LIGHT BODIED
lbodied = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("light bodied",dataset[i,1],ignore.case=TRUE)==TRUE)
{
lbodied[i]=1
}
}
# BLACK PLUM
bplum = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("black plum",dataset[i,1],ignore.case=TRUE)==TRUE)
{
bplum[i]=1
}
}
# DRY
bonedry = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("dry",dataset[i,1],ignore.case=TRUE)==TRUE)
{
bonedry[i]=1
}
}
# BLACK CHERRIES
blkcherries = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("black cherries",dataset[i,1],ignore.case=TRUE)==TRUE)
{
blkcherries[i]=1
}
}
# CHERRY FLAVORS
chflavors = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("cherry flavors",dataset[i,1],ignore.case=TRUE)==TRUE)
{
bt[i]=1
}
}
# DARK CHOCOLATE
drkchocolate = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("dark chocolate",dataset[i,1],ignore.case=TRUE)==TRUE)
{
drkchocolate[i]=1
}
}
# BLACK CURRANT
blkcurrant = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("black currant",dataset[i,1],ignore.case=TRUE)==TRUE)
{
blkcurrant[i]=1
}
}
# OAK FLAVORS
oakflavors = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("oak flavors",dataset[i,1],ignore.case=TRUE)==TRUE)
{
oakflavors[i]=1
}
}
# SPICE FLAVORS
spflavors = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("spice flavors",dataset[i,1],ignore.case=TRUE)==TRUE)
{
spflavors[i]=1
}
}
# GREEN APPLE
greenapple = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("green apple",dataset[i,1],ignore.case=TRUE)==TRUE)
{
greenapple[i]=1
}
}
# TROPICAL FRUIT
trpfruit = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("tropical fruit",dataset[i,1],ignore.case=TRUE)==TRUE)
{
trpfruit[i]=1
}
}
# RED CURRANT
redcurrant = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("red currant",dataset[i,1],ignore.case=TRUE)==TRUE)
{
redcurrant[i]=1
}
}
# PETIT VERDOT
petitverdot = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("petit verdot",dataset[i,1],ignore.case=TRUE)==TRUE)
{
petitverdot[i]=1
}
}
# HIGH TONED
hightoned = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("high toned",dataset[i,1],ignore.case=TRUE)==TRUE)
{
hightoned[i]=1
}
}
# VARIETAL WINE
varietalwine = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("varietal wine",dataset[i,1],ignore.case=TRUE)==TRUE)
{
varietalwine[i]=1
}
}
# WHITE PEPPER
whitepepper = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("white pepper",dataset[i,1],ignore.case=TRUE)==TRUE)
{
whitepepper[i]=1
}
}
# BRISK ACITITY
briskacidity = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("brisk acidity",dataset[i,1],ignore.case=TRUE)==TRUE)
{
briskacidity[i]=1
}
}
# BRIGHT ACIDITY
brightacidity = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("bright acidity",dataset[i,1],ignore.case=TRUE)==TRUE)
{
brightacidity[i]=1
}
}
# BLACK LICORICE
blacklicorice = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("black licorice",dataset[i,1],ignore.case=TRUE)==TRUE)
{
blacklicorice[i]=1
}
}
# BARREL SPICE
barrelspice = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("barrel spice",dataset[i,1],ignore.case=TRUE)==TRUE)
{
barrelspice[i]=1
}
}
# GRAINED TANNINS
grainedtannins = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("grained tannins",dataset[i,1],ignore.case=TRUE)==TRUE)
{
grainedtannins[i]=1
}
}
# SMOOTH TANNINS
smoothtannins = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("smooth tannins",dataset[i,1],ignore.case=TRUE)==TRUE)
{
smoothtannins[i]=1
}
}
# 100% VARIETAL
varietal = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("100% varietal",dataset[i,1],ignore.case=TRUE)==TRUE)
{
varietal[i]=1
}
}
# SWEET
sweet = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("sweet",dataset[i,1],ignore.case=TRUE)==TRUE)
{
sweet[i]=1
}
}
# DARK RED
darkred = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("dark red",dataset[i,1],ignore.case=TRUE)==TRUE)
{
darkred[i]=1
}
}
# BLACK TEA
blacktea = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("black tea",dataset[i,1],ignore.case=TRUE)==TRUE)
{
blacktea[i]=1
}
}
# FOREST FLOOR
forestfloor = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("forest floor",dataset[i,1],ignore.case=TRUE)==TRUE)
{
forestfloor[i]=1
}
}
# ROSE PETALS
rosepetals = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("rose petals",dataset[i,1],ignore.case=TRUE)==TRUE)
{
rosepetals[i]=1
}
}
# SILKY TEXTURE
silkytexture = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("silky texture",dataset[i,1],ignore.case=TRUE)==TRUE)
{
silkytexture[i]=1
}
}
# SOUR CHERRY
sourcherry = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("sour cherry",dataset[i,1],ignore.case=TRUE)==TRUE)
{
sourcherry[i]=1
}
}
# CABERNET SAUVIGNON
cabernet = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("cabernet sauvignon",dataset[i,1],ignore.case=TRUE)==TRUE)
{
cabernet[i]=1
}
}
# Chardonnay
chardonnay = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("chardonnay",dataset[i,1],ignore.case=TRUE)==TRUE)
{
chardonnay[i]=1
}
}
# MALBEC
malbec = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("malbec",dataset[i,1],ignore.case=TRUE)==TRUE)
{
malbec[i]=1
}
}
# MERLOT
merlot = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("merlot",dataset[i,1],ignore.case=TRUE)==TRUE)
{
merlot[i]=1
}
}
# PINOT GRIS
pinotgris = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("pinot gris",dataset[i,1],ignore.case=TRUE)==TRUE)
{
pinotgris[i]=1
}
}
# PINOT NOIR
pinotnoir = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("pinot noir",dataset[i,1],ignore.case=TRUE)==TRUE)
{
pinotnoir[i]=1
}
}
# RIESLING
riesling = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("riesling",dataset[i,1],ignore.case=TRUE)==TRUE)
{
riesling[i]=1
}
}
# ZINFANDEL
zinfandel = rep(0,length(dataset[,4]))
for(i in 1:length(dataset[,3]))
{
if(grepl("zinfandel",dataset[i,1],ignore.case=TRUE)==TRUE)
{
zinfandel[i]=1
}
}
# Add Almond, Apple, Apricot,
# berries, blueberry, candied,
# chocolate,
# clean, cocoa,
# cranberry, currant,
# easy, elegant, espresso, flower, fragrant,
# grapefruit, green,
# grilled, herb, honey,leather, lemon,
#
# racy(21),
#
# silky(25),
#
# subtle(23), supple(24),
# thyme(21),
# tropical(30), velvety(21), vibrant(36),
#
# UPDATED DATASET ---------------------------------------------------------
fullData = cbind.data.frame(factor(dataset[,5]),dataset[,2],dataset[,3],dataset[,4],
dry,tannic,soft,cherry,stiff,thick,
creamy,tart,bitter,berry,red,white,peach,deep,oak,
smoky,rich,vanilla,medium,pepper,plum,crisp,sweet,dark,
fullBodied,fullBodied_s,mediumBodied,mediumBodied_s,refreshing,spice,
savory,round,orange,smooth,bright,spicy,stone,licorice,lively,fruity,
mineral,cherries,chalky,dense,raspberry,graphite,
toast,tobacco,structured,black,balanced,balsamic,cool,cinnamon,
tang,jam,coffee,blackberry,roast,firm,cedar,beef,complex,cranberry,
caramel,mocha,rose,intense,concentrated,earthy,strong,sour,meat,
cassis,sharp,vintage,bcf,nfo,fbw,mbw,tff,sso,sff,fgt,dff,
bff,lic,hia,rrv,bt,bp,rc,ca,ft,lbodied,bplum,bonedry,blkcherries,drkchocolate,
blkcurrant,oakflavors,spflavors,greenapple,trpfruit,redcurrant,petitverdot,
hightoned,varietalwine,whitepepper,briskacidity,brightacidity,blacklicorice,
barrelspice,grainedtannins,smoothtannins,cabernet,chardonnay,malbec,merlot,
pinotgris,pinotnoir,riesling,zinfandel)
names(fullData)[1] <- "variety"
names(fullData)[2] <- "score"
names(fullData)[3] <- "price"
names(fullData)[4] <- "province"
write.csv(fullData,file="fullData.csv",row.names=FALSE)
# TRAIN/TEST --------------------------------------------------------------
train=sample(length(fullData[,3]),length(fullData[,3])/1.75)
test = (-train)
X_train = fullData[train,-1]
y_train = fullData[train,1]
X_test = fullData[test,-1]
y_test = fullData[test,1]
# RANDOM FOREST -----------------------------------------------------------
library(randomForest)
# tuneModel = tuneRF(X_train,y_train,mtryStart=20,ntreeTry=250,stepFactor=2,improve=0.01)
wineCutoff = c(.125,.125,.125,.125,.125,.125,.125,.125)
rfModel = randomForest(x=X_train,y=y_train,ntree=20,mtry=20,importance=TRUE,
type = "classification",cutoff = wineCutoff,do.trace=TRUE)
rfModel
importance(rfModel)
plot(rfModel,main="Random Forest Error Plot")
rfPred = predict(rfModel,newdata = X_test)
mean(y_test == rfPred)
table(y_test,rfPred)
importance(rfModel)
# Using h2o Framework
library(h2o)
h2o.init(nthreads=-1)
fullData_rf <- h2o.importFile(path="fullData.csv")
fullData_rf.split <- h2o.splitFrame(data=fullData_rf, ratios=0.75)
nnTrain = fullData_rf.split[[1]]
nnTest = fullData_rf.split[[2]]
h2oRFModel = h2o.randomForest(y='variety',training_frame=nnTrain,validation_frame=nnTest,ntrees=2000,max_depth=25)
h2oRFPred = h2o.pred(h2oRFModel,newdata=nnTest)
RFPreds = as.data.frame(h2oRFPred)[,1]
mean(RFPreds!=as.data.frame(nnTest)[,1])
# NEURAL NETWORK ----------------------------------------------------------
library(h2o)
h2o.init(nthreads=-1)
fullData_nn <- h2o.importFile(path="fullData.csv")
fullData_nn.split <- h2o.splitFrame(data=fullData_nn, ratios=0.75) # Original ratios 0.75
nnTrain = fullData_nn.split[[1]]
nnTest = fullData_nn.split[[2]]
minError = .4
for(i in 21:30) #15,13||12,10* -- .24646782 13,14 --.2185
{
for(j in 21:30)
{
dlModel = h2o.deeplearning(y='variety',training_frame=nnTrain,
activation='Maxout',
epochs=25,train_samples_per_iteration=-2,loss='CrossEntropy',
hidden=c(12,12))
dlPred = h2o.predict(dlModel,newdata=nnTest)
dlPred = as.data.frame(dlPred)[,1]
if(mean(dlPred!=as.data.frame(nnTest)[,1])<minError)
{
print(c("Min Error with layers ",j,i,mean(dlPred!=as.data.frame(nnTest)[,1])))
minError = mean(dlPred!=as.data.frame(nnTest)[,1])
}
}
}
# errorVec = rep(0,33)
for(k in seq(from=10,to=30,by=1))
{
dlModel = h2o.deeplearning(y='variety',training_frame=nnTrain,activation='Maxout',
epochs=20,train_samples_per_iteration=-2,loss='CrossEntropy',
hidden=c(k))
dlPred = h2o.predict(dlModel,newdata=nnTest)
dlPred = as.data.frame(dlPred)[,1]
# errorVec[k] = mean(dlPred!=as.data.frame(nnTest)[,1])
print(mean(dlPred!=as.data.frame(nnTest)[,1]))
if(mean(dlPred!=as.data.frame(nnTest)[,1])<minError)
{
print(c("Min Error with layers ",j,i,mean(dlPred!=as.data.frame(nnTest)[,1])))
print(c("Layer 1: ",k))
minError = mean(dlPred!=as.data.frame(nnTest)[,1])
}
}
errorVec[errorVec==0] <- NA
errorVec<-errorVec[complete.cases(errorVec)]
plot(errorVec,xlim=c(1,33),ylab="Error",type="l")
dlModel = h2o.deeplearning(y='variety',training_frame=nnTrain,activation='Maxout',
epochs=20,train_samples_per_iteration=-2,loss='CrossEntropy',
hidden=c(12,10))
dlPred = h2o.predict(dlModel,newdata=nnTest[5,-1])
dlPred = as.data.frame(dlPred)[,1]
# table(dlPred,as.data.frame(nnTest)[,1])
mean(dlPred!=as.data.frame(nnTest)[,1])
# Binary Classification ---------------------------------------------------
# PINOT NOIR
datasetPinot = fullData
datasetPinot$variety = ifelse(datasetPinot$variety=="Pinot Noir",1,0)
train = sample(length(datasetPinot$variety),length(datasetPinot$variety)/1.67)
test = (-train)
library(gbm)
gbmModel = gbm(variety~.,data=datasetPinot[train,],distribution="adaboost",shrinkage=0.01,n.cores=4,n.trees=3000,interaction.depth=1)
gbmPred = predict(gbmModel,newdata=datasetPinot[test,],n.trees=3000,type="response")
gbmPred = ifelse(gbmPred>0.3,1,0)
table(gbmPred,datasetPinot$variety[test])
# MALBEC
datasetMalbec = fullData
datasetMalbec$variety = ifelse(datasetMalbec$variety=="Malbec",1,0)
train = sample(length(datasetMalbec$variety),length(datasetMalbec$variety)/1.67)
test = (-train)
library(gbm)
gbmModel = gbm(variety~.,data=datasetMalbec[train,],distribution="adaboost",shrinkage=0.01,n.cores=4,n.trees=3000,interaction.depth=2)
gbmPred = predict(gbmModel,newdata=datasetMalbec[test,],n.trees=3000,type="response")
gbmPred = ifelse(gbmPred>0.3,1,0)
table(gbmPred,datasetMalbec$variety[test])
|
##Getting data, project R-code
## read the training data
## read features, remove those symbols
folder <- paste(getwd(), "/UCI HAR Dataset", sep = "")
Variables <- read.table(paste(folder, "/features.txt", sep = ""))
Var <- Variables$V2
Var <- gsub("-", "", Var)
Var <- gsub("[(]", "", Var)
Var <- gsub("[)]", "", Var)
## read training set data, rename data
trainx <- read.table(paste(folder, "/train/X_train.txt", sep = ""))
names(trainx) <- Var
trainy <- read.table(paste(folder, "/train/y_train.txt", sep = ""))
names(trainy) <- "activity"
trainsub <- read.table(paste(folder, "/train/subject_train.txt", sep = ""))
names(trainsub) <- "subject"
## combine data
train <- cbind(trainsub, trainy, trainx)
## test set
testx <- read.table(paste(folder, "/test/X_test.txt", sep = ""))
names(testx) <- Var
testy <- read.table(paste(folder, "/test/y_test.txt", sep = ""))
names(testy) <- "activity"
testsub <- read.table(paste(folder, "/test/subject_test.txt", sep = ""))
names(testsub) <- "subject"
test <- cbind(testsub, testy, testx)
##2947 563
all <- rbind(test, train) ##10299 563
names <- names(all)
all1 <- all[, grep("mean", (names))]
meandata <- select(all1, -contains("Freq"))
stddata <- all[, grep("std", (names))] ##10299, 33
new_data <- cbind(all[,c(1,2)], meandata, stddata) ##10299, 68
##Get activity_labels
actlab <- read.table(paste(folder, "/activity_labels.txt", sep=""))
##Replace the number with activity_labels
new2 <- data.frame()
new2 <- NULL
for (i in 1:nrow(actlab)) {act <- filter(new_data, activity == actlab$V1[i])
act <- mutate(act, activity = actlab$V2[i])
new2 <- rbind(new2, act)}
##new2 is the activity named data
##Step 4, calculate mean of each variables by each subject and each activity.
##Get a new data frame
new_set <- data.frame(NULL)
## loop through all 30 subjects.
##There must be many better ways to do this.
##Given my knowledge and time limitation, this is something I can do to finish it by deadline.
for (i in 1:30) {
## Get data set by each subject
sub1 <- filter(new2, subject == i)
## calculate mean data by each activity
md <-data.frame(NULL)
## 6 activities
for (j in 1:6) {sub1act <- filter(sub1, activity == actlab$V2[j])
md <- rbind(md, apply(sub1act[,c(3:68)], 2, mean))
}
## combine the data of each subject and add names
sub1new <- cbind(i, actlab$V2, md)
names(sub1new) <- names(new2)
## add data to new tidy data set
new_set <- rbind(new_set, sub1new)
}
##write.table() using row.name=FALSE
write.table(new_set, "tidydata_set.txt", row.name=FALSE)
## dim(new_set) is 180, 68
| /run_analysis.r | no_license | jhzhao/GettingData | R | false | false | 2,666 | r | ##Getting data, project R-code
## read the training data
## read features, remove those symbols
folder <- paste(getwd(), "/UCI HAR Dataset", sep = "")
Variables <- read.table(paste(folder, "/features.txt", sep = ""))
Var <- Variables$V2
Var <- gsub("-", "", Var)
Var <- gsub("[(]", "", Var)
Var <- gsub("[)]", "", Var)
## read training set data, rename data
trainx <- read.table(paste(folder, "/train/X_train.txt", sep = ""))
names(trainx) <- Var
trainy <- read.table(paste(folder, "/train/y_train.txt", sep = ""))
names(trainy) <- "activity"
trainsub <- read.table(paste(folder, "/train/subject_train.txt", sep = ""))
names(trainsub) <- "subject"
## combine data
train <- cbind(trainsub, trainy, trainx)
## test set
testx <- read.table(paste(folder, "/test/X_test.txt", sep = ""))
names(testx) <- Var
testy <- read.table(paste(folder, "/test/y_test.txt", sep = ""))
names(testy) <- "activity"
testsub <- read.table(paste(folder, "/test/subject_test.txt", sep = ""))
names(testsub) <- "subject"
test <- cbind(testsub, testy, testx)
##2947 563
all <- rbind(test, train) ##10299 563
names <- names(all)
all1 <- all[, grep("mean", (names))]
meandata <- select(all1, -contains("Freq"))
stddata <- all[, grep("std", (names))] ##10299, 33
new_data <- cbind(all[,c(1,2)], meandata, stddata) ##10299, 68
##Get activity_labels
actlab <- read.table(paste(folder, "/activity_labels.txt", sep=""))
##Replace the number with activity_labels
new2 <- data.frame()
new2 <- NULL
for (i in 1:nrow(actlab)) {act <- filter(new_data, activity == actlab$V1[i])
act <- mutate(act, activity = actlab$V2[i])
new2 <- rbind(new2, act)}
##new2 is the activity named data
##Step 4, calculate mean of each variables by each subject and each activity.
##Get a new data frame
new_set <- data.frame(NULL)
## loop through all 30 subjects.
##There must be many better ways to do this.
##Given my knowledge and time limitation, this is something I can do to finish it by deadline.
for (i in 1:30) {
## Get data set by each subject
sub1 <- filter(new2, subject == i)
## calculate mean data by each activity
md <-data.frame(NULL)
## 6 activities
for (j in 1:6) {sub1act <- filter(sub1, activity == actlab$V2[j])
md <- rbind(md, apply(sub1act[,c(3:68)], 2, mean))
}
## combine the data of each subject and add names
sub1new <- cbind(i, actlab$V2, md)
names(sub1new) <- names(new2)
## add data to new tidy data set
new_set <- rbind(new_set, sub1new)
}
##write.table() using row.name=FALSE
write.table(new_set, "tidydata_set.txt", row.name=FALSE)
## dim(new_set) is 180, 68
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/slice.R
\name{step_slice}
\alias{step_slice}
\title{Filter rows by position using dplyr}
\usage{
step_slice(
recipe,
...,
role = NA,
trained = FALSE,
inputs = NULL,
skip = TRUE,
id = rand_id("slice")
)
}
\arguments{
\item{recipe}{A recipe object. The step will be added to the
sequence of operations for this recipe.}
\item{...}{Integer row values. See
\code{\link[dplyr:slice]{dplyr::slice()}} for more details.}
\item{role}{Not used by this step since no new variables are
created.}
\item{trained}{A logical to indicate if the quantities for
preprocessing have been estimated.}
\item{inputs}{Quosure of values given by \code{...}.}
\item{skip}{A logical. Should the step be skipped when the
recipe is baked by \code{\link[=bake]{bake()}}? While all operations are baked
when \code{\link[=prep]{prep()}} is run, some operations may not be able to be
conducted on new data (e.g. processing the outcome variable(s)).
Care should be taken when using \code{skip = FALSE}.}
\item{id}{A character string that is unique to this step to identify it.}
}
\value{
An updated version of \code{recipe} with the new step added to the
sequence of any existing operations.
}
\description{
\code{step_slice()} creates a \emph{specification} of a recipe step that will filter
rows using \code{\link[dplyr:slice]{dplyr::slice()}}.
}
\details{
When an object in the user's global environment is
referenced in the expression defining the new variable(s),
it is a good idea to use quasiquotation (e.g. \verb{!!})
to embed the value of the object in the expression (to
be portable between sessions). See the examples.
}
\section{Row Filtering}{
This step can entirely remove observations (rows of data), which can have
unintended and/or problematic consequences when applying the step to new
data later via \code{\link[=bake]{bake()}}. Consider whether \code{skip = TRUE} or
\code{skip = FALSE} is more appropriate in any given use case. In most instances
that affect the rows of the data being predicted, this step probably should
not be applied at all; instead, execute operations like this outside and
before starting a preprocessing \code{\link[=recipe]{recipe()}}.
}
\section{Tidying}{
When you \code{\link[=tidy.recipe]{tidy()}} this step, a tibble with column
\code{terms} which contains the filtering indices is returned.
}
\section{Case weights}{
The underlying operation does not allow for case weights.
}
\examples{
rec <- recipe(~., data = iris) \%>\%
step_slice(1:3)
prepped <- prep(rec, training = iris \%>\% slice(1:75))
tidy(prepped, number = 1)
library(dplyr)
dplyr_train <-
iris \%>\%
as_tibble() \%>\%
slice(1:75) \%>\%
slice(1:3)
rec_train <- bake(prepped, new_data = NULL)
all.equal(dplyr_train, rec_train)
dplyr_test <-
iris \%>\%
as_tibble() \%>\%
slice(76:150) \%>\%
slice(1:3)
rec_test <- bake(prepped, iris \%>\% slice(76:150))
all.equal(dplyr_test, rec_test)
# Embedding the integer expression (or vector) into the
# recipe:
keep_rows <- 1:6
qq_rec <-
recipe(~., data = iris) \%>\%
# Embed `keep_rows` in the call using !!
step_slice(!!keep_rows) \%>\%
prep(training = iris)
tidy(qq_rec, number = 1)
}
\seealso{
Other row operation steps:
\code{\link{step_arrange}()},
\code{\link{step_filter}()},
\code{\link{step_impute_roll}()},
\code{\link{step_lag}()},
\code{\link{step_naomit}()},
\code{\link{step_sample}()},
\code{\link{step_shuffle}()}
Other dplyr steps:
\code{\link{step_arrange}()},
\code{\link{step_filter}()},
\code{\link{step_mutate_at}()},
\code{\link{step_mutate}()},
\code{\link{step_rename_at}()},
\code{\link{step_rename}()},
\code{\link{step_sample}()},
\code{\link{step_select}()}
}
\concept{dplyr steps}
\concept{row operation steps}
| /man/step_slice.Rd | permissive | tidymodels/recipes | R | false | true | 3,804 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/slice.R
\name{step_slice}
\alias{step_slice}
\title{Filter rows by position using dplyr}
\usage{
step_slice(
recipe,
...,
role = NA,
trained = FALSE,
inputs = NULL,
skip = TRUE,
id = rand_id("slice")
)
}
\arguments{
\item{recipe}{A recipe object. The step will be added to the
sequence of operations for this recipe.}
\item{...}{Integer row values. See
\code{\link[dplyr:slice]{dplyr::slice()}} for more details.}
\item{role}{Not used by this step since no new variables are
created.}
\item{trained}{A logical to indicate if the quantities for
preprocessing have been estimated.}
\item{inputs}{Quosure of values given by \code{...}.}
\item{skip}{A logical. Should the step be skipped when the
recipe is baked by \code{\link[=bake]{bake()}}? While all operations are baked
when \code{\link[=prep]{prep()}} is run, some operations may not be able to be
conducted on new data (e.g. processing the outcome variable(s)).
Care should be taken when using \code{skip = FALSE}.}
\item{id}{A character string that is unique to this step to identify it.}
}
\value{
An updated version of \code{recipe} with the new step added to the
sequence of any existing operations.
}
\description{
\code{step_slice()} creates a \emph{specification} of a recipe step that will filter
rows using \code{\link[dplyr:slice]{dplyr::slice()}}.
}
\details{
When an object in the user's global environment is
referenced in the expression defining the new variable(s),
it is a good idea to use quasiquotation (e.g. \verb{!!})
to embed the value of the object in the expression (to
be portable between sessions). See the examples.
}
\section{Row Filtering}{
This step can entirely remove observations (rows of data), which can have
unintended and/or problematic consequences when applying the step to new
data later via \code{\link[=bake]{bake()}}. Consider whether \code{skip = TRUE} or
\code{skip = FALSE} is more appropriate in any given use case. In most instances
that affect the rows of the data being predicted, this step probably should
not be applied at all; instead, execute operations like this outside and
before starting a preprocessing \code{\link[=recipe]{recipe()}}.
}
\section{Tidying}{
When you \code{\link[=tidy.recipe]{tidy()}} this step, a tibble with column
\code{terms} which contains the filtering indices is returned.
}
\section{Case weights}{
The underlying operation does not allow for case weights.
}
\examples{
rec <- recipe(~., data = iris) \%>\%
step_slice(1:3)
prepped <- prep(rec, training = iris \%>\% slice(1:75))
tidy(prepped, number = 1)
library(dplyr)
dplyr_train <-
iris \%>\%
as_tibble() \%>\%
slice(1:75) \%>\%
slice(1:3)
rec_train <- bake(prepped, new_data = NULL)
all.equal(dplyr_train, rec_train)
dplyr_test <-
iris \%>\%
as_tibble() \%>\%
slice(76:150) \%>\%
slice(1:3)
rec_test <- bake(prepped, iris \%>\% slice(76:150))
all.equal(dplyr_test, rec_test)
# Embedding the integer expression (or vector) into the
# recipe:
keep_rows <- 1:6
qq_rec <-
recipe(~., data = iris) \%>\%
# Embed `keep_rows` in the call using !!
step_slice(!!keep_rows) \%>\%
prep(training = iris)
tidy(qq_rec, number = 1)
}
\seealso{
Other row operation steps:
\code{\link{step_arrange}()},
\code{\link{step_filter}()},
\code{\link{step_impute_roll}()},
\code{\link{step_lag}()},
\code{\link{step_naomit}()},
\code{\link{step_sample}()},
\code{\link{step_shuffle}()}
Other dplyr steps:
\code{\link{step_arrange}()},
\code{\link{step_filter}()},
\code{\link{step_mutate_at}()},
\code{\link{step_mutate}()},
\code{\link{step_rename_at}()},
\code{\link{step_rename}()},
\code{\link{step_sample}()},
\code{\link{step_select}()}
}
\concept{dplyr steps}
\concept{row operation steps}
|
url <- 'https://www.imdb.com/search/title/?count=100&release_date=2016,2016&title_type=feature'
read_html(url) -> webpage
#Using CSS selectors to scrape the rankings section
rank_data_html <- html_nodes(webpage,'.text-primary')
rank_data <- html_text(rank_data_html)
head(rank_data)
#Data preprocessing
as.numeric(rank_data) -> rank_data
head(rank_data)
#Using CSS selectors to scrape the title section
title_data_html <- html_nodes(webpage,'.lister-item-header a')
title_data <- html_text(title_data_html)
head(title_data)
#Using CSS selectors to scrape the description section
description_data_html <- html_nodes(webpage,'.ratings-bar+ .text-muted')
description_data <- html_text(description_data_html)
head(description_data)
#Data-Preprocessing:
description_data<-gsub("\n","",description_data)
head(description_data)
#Using CSS selectors to scrape the Movie runtime section
runtime_data_html <- html_nodes(webpage,'.text-muted .runtime')
runtime_data <- html_text(runtime_data_html)
head(runtime_data)
#Data-Preprocessing:
runtime_data<-gsub(" min","",runtime_data)
runtime_data<-as.numeric(runtime_data)
head(runtime_data)
#Using CSS selectors to scrape the Movie genre section
genre_data_html <- html_nodes(webpage,'.genre')
genre_data <- html_text(genre_data_html)
head(genre_data)
#Data-Preprocessing:
genre_data<-gsub("\n","",genre_data)
#Data-Preprocessing:
genre_data<-gsub(" ","",genre_data)
#taking only the first genre of each movie
genre_data<-gsub(",.*","",genre_data)
#Convering each genre from text to factor
genre_data<-as.factor(genre_data)
head(genre_data)
#Using CSS selectors to scrape the IMDB rating section
rating_data_html <- html_nodes(webpage,'.ratings-imdb-rating strong')
rating_data <- html_text(rating_data_html)
head(rating_data)
#Data-Preprocessing:
rating_data<-as.numeric(rating_data)
head(rating_data)
#Using CSS selectors to scrape the votes section
votes_data_html <- html_nodes(webpage,'.sort-num_votes-visible span:nth-child(2)')
votes_data <- html_text(votes_data_html)
head(votes_data)
#Data-Preprocessing:
votes_data<-gsub(",","",votes_data)
votes_data<-as.numeric(votes_data)
head(votes_data)
#Using CSS selectors to scrape the directors section
directors_data_html <- html_nodes(webpage,'.text-muted+ p a:nth-child(1)')
directors_data <- html_text(directors_data_html)
head(directors_data)
#Data-Preprocessing:
directors_data<-as.factor(directors_data)
#Using CSS selectors to scrape the actors section
actors_data_html <- html_nodes(webpage,'.lister-item-content .ghost+ a')
actors_data <- html_text(actors_data_html)
head(actors_data)
#Data-Preprocessing:
actors_data<-as.factor(actors_data)
#Combining all the lists to form a data frame
movies_df<-data.frame(Rank = rank_data, Title = title_data,Description = description_data, Runtime = runtime_data, Genre = genre_data, Rating = rating_data, Votes = votes_data)
#Structure of the data frame
str(movies_df)
#data visualization
library('ggplot2')
qplot(data = movies_df,Runtime,fill = Genre,bins = 30)
ggplot(movies_df,aes(x=Runtime,y=Rating))+geom_point(aes(size=Votes,col=Genre))
| /Web scrapping.R | no_license | prats556/Web_Scrapping_of_IMDB- | R | false | false | 3,233 | r | url <- 'https://www.imdb.com/search/title/?count=100&release_date=2016,2016&title_type=feature'
read_html(url) -> webpage
#Using CSS selectors to scrape the rankings section
rank_data_html <- html_nodes(webpage,'.text-primary')
rank_data <- html_text(rank_data_html)
head(rank_data)
#Data preprocessing
as.numeric(rank_data) -> rank_data
head(rank_data)
#Using CSS selectors to scrape the title section
title_data_html <- html_nodes(webpage,'.lister-item-header a')
title_data <- html_text(title_data_html)
head(title_data)
#Using CSS selectors to scrape the description section
description_data_html <- html_nodes(webpage,'.ratings-bar+ .text-muted')
description_data <- html_text(description_data_html)
head(description_data)
#Data-Preprocessing:
description_data<-gsub("\n","",description_data)
head(description_data)
#Using CSS selectors to scrape the Movie runtime section
runtime_data_html <- html_nodes(webpage,'.text-muted .runtime')
runtime_data <- html_text(runtime_data_html)
head(runtime_data)
#Data-Preprocessing:
runtime_data<-gsub(" min","",runtime_data)
runtime_data<-as.numeric(runtime_data)
head(runtime_data)
#Using CSS selectors to scrape the Movie genre section
genre_data_html <- html_nodes(webpage,'.genre')
genre_data <- html_text(genre_data_html)
head(genre_data)
#Data-Preprocessing:
genre_data<-gsub("\n","",genre_data)
#Data-Preprocessing:
genre_data<-gsub(" ","",genre_data)
#taking only the first genre of each movie
genre_data<-gsub(",.*","",genre_data)
#Convering each genre from text to factor
genre_data<-as.factor(genre_data)
head(genre_data)
#Using CSS selectors to scrape the IMDB rating section
rating_data_html <- html_nodes(webpage,'.ratings-imdb-rating strong')
rating_data <- html_text(rating_data_html)
head(rating_data)
#Data-Preprocessing:
rating_data<-as.numeric(rating_data)
head(rating_data)
#Using CSS selectors to scrape the votes section
votes_data_html <- html_nodes(webpage,'.sort-num_votes-visible span:nth-child(2)')
votes_data <- html_text(votes_data_html)
head(votes_data)
#Data-Preprocessing:
votes_data<-gsub(",","",votes_data)
votes_data<-as.numeric(votes_data)
head(votes_data)
#Using CSS selectors to scrape the directors section
directors_data_html <- html_nodes(webpage,'.text-muted+ p a:nth-child(1)')
directors_data <- html_text(directors_data_html)
head(directors_data)
#Data-Preprocessing:
directors_data<-as.factor(directors_data)
#Using CSS selectors to scrape the actors section
actors_data_html <- html_nodes(webpage,'.lister-item-content .ghost+ a')
actors_data <- html_text(actors_data_html)
head(actors_data)
#Data-Preprocessing:
actors_data<-as.factor(actors_data)
#Combining all the lists to form a data frame
movies_df<-data.frame(Rank = rank_data, Title = title_data,Description = description_data, Runtime = runtime_data, Genre = genre_data, Rating = rating_data, Votes = votes_data)
#Structure of the data frame
str(movies_df)
#data visualization
library('ggplot2')
qplot(data = movies_df,Runtime,fill = Genre,bins = 30)
ggplot(movies_df,aes(x=Runtime,y=Rating))+geom_point(aes(size=Votes,col=Genre))
|
library(shiny)
library(dygraphs)
library(datasets)
library(xts)
library(plyr)
##---functions----------
## any functions goes here
##---end of functions-------
## Define server logic required to generate and plot a random distribution
shinyServer(function(input,output) {
dataset <- reactive({
if(input$file_check==TRUE){#--start of else if for selecting file upload
inFile <- input$file
if (is.null(inFile))
return(NULL)
#data<-read.table(inFile$datapath, sep=" ")
data<- read.csv(inFile$datapath, header = T)
}#--end of else if for selecting file upload
else{
sensor = input$radio#"Node_383"
channels = "acceleration-x,acceleration-y,acceleration-z,air:temperature"
date_start = input$date_start#"2015-03-26"
time_start = input$time_start#"13:16:01"
date_end = input$date_end#"2015-03-26"
time_end = input$time_end#"13:17:15"
url <- paste0("http://quader.igg.tu-berlin.de/istsos/bridgedemoservice?service=SOS&request=GetObservation&offering=temporary&procedure=",sensor,
"&observedProperty=",channels,
"&responseFormat=text/plain&version=1.0.0&eventTime=", date_start,"T",time_start,"+0100/",date_end,"T",time_end,"+0100")
data <- read.csv(file=url, header = TRUE)# read csv file
}
#--renaming for data from istsos
urn = as.character(lapply(strsplit(names(data), split="1.0."), "[", 1))
colnames = as.character(lapply(strsplit(names(data), split="1.0."), "[", 2))
names(data)<-paste(colnames)
names(data)[2]<- paste("procedure")
data
})
########----data for testing , given by THOMAS bECKER-----------
# Node_383 <- read.csv(file="Node383.csv", header = TRUE)# read csv file
# Node_384 <- read.csv(file="Node384.csv", header = TRUE, skip =15)# read csv file
# Node_573 <- read.csv(file="Node573.csv", header = TRUE, skip =15)# read csv file
# Node_574 <- read.csv(file="Node574.csv", header = TRUE, skip =15)# read csv file
#--datset chooseing--
# dataset <- reactive({
# if(input$radio == "Node_383")
# data <- Node_383
#
# else if(input$radio == "Node_384")
# data <- Node_384
#
# else if(input$radio == "Node_573")
# data <- Node_573
#
# else if(input$radio == "Node_574")
# data <- Node_574
#
# # else if(input$file)
# # fileinput <- input$file
# # #if(is.null(fileinput)){return()}
# # read.table(file=file$datapath)
#
# else("Error selecting a Node")
# })
################----end of datset---------
output$datasize <- renderUI({
sliderInput('samplesize','Data Size', min=10, max=nrow(dataset()),
value=min(500,nrow(dataset())),
step=50,
round=0)
})
output$dateRangeText <- renderText({
paste("input$dateRange is",
paste(as.character(input$dateRange), collapse = " to ")
)
})
#-------time conversion----------
################ time stamp conversion for test datasets
# time<- reactive({
#
# timestamp = dataset()[,1]
# date = as.character(lapply(strsplit(as.character(timestamp), split=" "), "[", 1))
# hms = as.character(lapply(strsplit(as.character(timestamp), split=" "), "[", 2))
# h = as.numeric(lapply(strsplit(hms, split=":"), "[", 1))
# m = as.numeric(lapply(strsplit(hms, split=":"), "[", 2))
# s = as.numeric(lapply(strsplit(hms, split=":"), "[", 3))
#
# time_sec = NULL
# for(i in 1:length(timestamp))
# {
# time_sec[i] = h[i]*3600 + m[i]*60 + s[i]
# }
# time_sec
####################
time<- reactive({
timestamp = dataset()[,1]
date = as.character(lapply(strsplit(as.character(timestamp), split="T"), "[", 1))
hms = as.character(lapply(strsplit(as.character(timestamp), split="T"), "[", 2))
h = as.numeric(lapply(strsplit(hms, split=":"), "[", 1))
m = as.numeric(lapply(strsplit(hms, split=":"), "[", 2))
# s = as.numeric(lapply(strsplit(hms, split=":"), "[", 3)) # gave NAs introduced by coercion
# Cutting characters manually
s = as.numeric(substr(hms, 7, 15)) # characters 7 to 15
# Gets UTC component, also cut manually
utcComp = as.numeric(substr(hms, 17, 18))
h=h+utcComp # adding UTC to hours value
# Filling time vector
time_sec = NULL
for(i in 1:length(timestamp))
{
time_sec[i] = h[i]*3600 + m[i]*60 + s[i]
}
time_sec
})
#----end of time conversion---------
#--------summary tab-----------------
output$dataname<- renderText({
if(input$file_check==TRUE){
paste("summary of dataset from File uploaded:")}
else{
paste("summary of dataset", input$radio)}
})
# output$choose_plot <- renderUI({
# radioButtons(inputId="choose_plot",
# label="Choose a type of plot for visualization ",
# choices=list(
# "Plot" = "plot",
# "Dygraphs Plot" = "dyplot"),
# selected="plot")
# })
output$plot_raw <- renderPlot({
if(input$radio == "node_574"){
ch1<- dataset()$acceleration.x[1:input$samplesize]
ch2<- dataset()$acceleration.y[1:input$samplesize]
plot(range(time()[1:input$samplesize], na.rm=T), range(c(ch1, ch2)), na.rm=T, type='n',xlab ="Time [in seconds]",ylab ="Accelerations", title ="Plot of accelerations of Raw data")
lines(time()[1:input$samplesize], ch1, col="red")
lines(time()[1:input$samplesize], ch2, col="green")}
else {
ch1<- dataset()$acceleration.x[1:input$samplesize]
ch2<- dataset()$acceleration.y[1:input$samplesize]
ch3<- dataset()$acceleration.z[1:input$samplesize]
plot(range(time()[1:input$samplesize], na.rm=T), range(c(ch1, ch2, ch3)), na.rm=T, type='n',xlab="Time [in seconds]", ylab ="Accelerations", main ="Plot of accelerations of Raw data")
lines(time()[1:input$samplesize], ch1, col="red")
lines(time()[1:input$samplesize], ch2, col="green")
lines(time()[1:input$samplesize], ch3, col="blue")}
})
output$raw_data <- renderUI({
if(input$radio != "node_574")
selectInput("raw_channel","select any data for ploting and Staionarity Check", choices = names(dataset()[,3:6]))
else
selectInput("raw_channel","select any data for ploting and Staionarity Check", choices = names(dataset()[,3:5]))
})
output$plot_temp <- renderPlot({
#attach(get(input$radio))
ch<-get(input$raw_channel)
if(input$radio == "Node_574"){
title<-paste(input$raw_channel," plot")
plot(time()[1:input$samplesize],ch[1:input$samplesize], type="l",col="red",xlab="Time [in seconds]", ylab ="Data", main = title)}
else{
title<-paste(input$raw_channel," plot")
plot(time()[1:input$samplesize],ch[1:input$samplesize], type="l",col="red",xlab="Time [in seconds]", ylab ="Data", main =title)}
})
outpt_text <- renderText({paste("Stationarity Test")})
output$st_test <- renderText({
attach(dataset())
data<-get(input$raw_channel)
source('stationarity_function.r')
stationarity_function(time()[1:input$samplesize],data[1:input$samplesize])
})
output$plot <- renderDygraph({
#test <- cbind(test1,test2,test3,test4)
dygraph(dataset()[,3:6], main = "Node383",) %>%
dyOptions(drawPoints = TRUE, pointSize= 1.3 ,colors = RColorBrewer::brewer.pal(4, "Set1"),)%>%
dyLegend(width= 550)%>%
dySeries("..1", axis = "y" , label = "Channel 1")%>%
dySeries("..2", axis = "y" , label = "Channel 2")%>%
dySeries("..3", axis = "y" , label = "Channel 3")%>%
dySeries("..4", axis = "y2" , label = "Channel 4")%>%
dyAxis("y", label = "Channel 1, 2 and 3")%>%
dyAxis("y2", label = "Channel 4")%>%
dyRangeSelector()
})
#-------end of summary-------------
#--------data tab----------------
output$data <- renderTable({
head(dataset(),n = input$samplesize)
})
output$data_down <- downloadHandler(
filename = function() { paste(input$radio, '.csv', sep='') },
content = function(file) {
write.csv(dataset(), file)
})
#-----end of data------------------
#--------statistic tab----------------
output$samplesize<- renderText({
if(input$file_check==TRUE){
paste(input$samplesize, "observations of dataset from File uploaded:")}
else{
paste(input$samplesize, "observations of dataset", input$radio)}
})
output$stats <- renderPrint({
summary(dataset()[input$samplesize,3:6])
})
#-----end of statistic------------------
#----similarity tab--------
output$acf_data <- renderUI({
if(input$radio != "Node_574")
selectInput("acf_channel","selecting the channels", choices = names(dataset()[,3:6]))
else
selectInput("acf_channel","selecting the channels", choices = names(dataset()[,3:5]))
})
acfInput <- reactive({
#attach(get(input$radio))
data<-get(input$acf_channel)[1:input$samplesize]
ac_res <- acf(data,length(data)/input$acf_lag, type = input$acf_options, na.action = na.pass, col="red") # type should be "correlation", "covariance", "partial"
})
output$plot_acf <- renderPlot({
print(acfInput())
})
output$down_acf <- downloadHandler(
filename = "Shinyplot.png",
content = function(file) {
png(file)
plot(acfInput())
dev.off()
})
output$ccf_data1 <- renderUI({
if(input$radio != "Node_574")
selectInput("ccf_channel1","selecting the channels", choices = names(dataset()[,3:6]))
else
selectInput("ccf_channel1","selecting the channels", choices = names(dataset()[,3:5]))
})
output$ccf_data2 <- renderUI({
if(input$radio != "Node_574")
selectInput("ccf_channel2","selecting the channels", choices = names(dataset()[,3:6]), selected = names(dataset()[4]))
else
selectInput("ccf_channel2","selecting the channels", choices = names(dataset()[,3:5]), selected = names(dataset()[4]))
})
ccfInput <- reactive({
#attach(get(input$radio))
ch1<-get(input$ccf_channel1)
ch2<-get(input$ccf_channel2)
data1= data.frame(time()[1:input$samplesize], ch1[1:input$samplesize])
data2= data.frame(time()[1:input$samplesize], ch2[1:input$samplesize])
source('crossCorr_function.r')
crossCorr_function(data1, data2, input$ccf_lag)
})
output$plot_ccf <- renderPlot({
print(ccfInput())
})
output$down_ccf <- downloadHandler(
filename = "Shinyplot.png",
content = function(file) {
png(file)
plot(ccfInput())
dev.off()
})
#-----end of similarity tab----
##----refression tab-------
output$reg <- renderUI({
selectInput("reg","choose a Regression Model", choices = c("Linear and Polynomial"="reg","Exponential Growth"="grow","Exponential Decay"="decay", "S Curve"="scurve"))
})
output$reg_data <- renderUI({
if(input$radio != "Node_574")
selectInput("reg_channel","selecting the channels", choices = names(dataset()[,3:6]))
else
selectInput("reg_channel","selecting the channels", choices = names(dataset()[,3:5]))
})
coef_test <- eventReactive(input$coef_test, {
#attach(get(input$radio))
data<-get(input$reg_channel)
source('coefficienttesting_function.r')
coefficienttesting_function(time()[1:input$samplesize],data[1:input$samplesize])
})
output$coef_table <- renderTable({
coef_test()
})
output$reg_options <- renderUI({
if(input$reg=="reg"){
selectInput("reg_options","Operations",
c("Linear Regression" = "LM",
"2nd order polyn Regression"= "PR2",
"3nd order polyn Regression"= "PR3",
"4nd order polyn Regression"= "PR4",
"5nd order polyn Regression"= "PR5",
"6nd order polyn Regression"= "PR6",
"7nd order polyn Regression"= "PR7",
"8nd order polyn Regression"= "PR8",
"9nd order polyn Regression"= "PR9",
"10nd order polyn Regression"= "PR10"),
selected="LM")
}
else if(input$reg =="grow"){
numericInput("reg_options","Alpha", 3.5 , min=2, max =50)
}
else if(input$reg =="decay"){
numericInput("reg_options","Alpha", 3.5 , min=2, max =50)
}
else if(input$reg =="scurve"){
numericInput("reg_options","Coefficient a", 3.5 , min=2, max =50)
}
})
regInput <- reactive({
#attach(get(input$radio))
data<-get(input$reg_channel)
if(input$reg=="reg"){
if(input$reg_options == "LM"){
source('linearRegression_function.r')
linearRegression_function(time()[1:input$samplesize],data[1:input$samplesize])}
else if(input$reg_options == "PR2"){
source('polynRegression2nd_function.r')
polynRegression2nd_function(time()[1:input$samplesize],data[1:input$samplesize])}
else if(input$reg_options == "PR3"){
source('polynRegression3rd_function.r')
polynRegression3rd_function(time()[1:input$samplesize],data[1:input$samplesize])}
else if(input$reg_options == "PR4"){
source('polynRegression4th_function.r')
polynRegression4th_function(time()[1:input$samplesize],data[1:input$samplesize])}
else if(input$reg_options == "PR5"){
source('polynRegression5th_function.r')
polynRegression5th_function(time()[1:input$samplesize],data[1:input$samplesize])}
else if(input$reg_options == "PR6"){
source('polynRegression6th_function.r')
polynRegression6th_function(time()[1:input$samplesize],data[1:input$samplesize])}
else if(input$reg_options == "PR7"){
source('polynRegression7th_function.r')
polynRegression7th_function(time()[1:input$samplesize],data[1:input$samplesize])}
else if(input$reg_options == "PR8"){
source('polynRegression8th_function.r')
polynRegression8th_function(time()[1:input$samplesize],data[1:input$samplesize])}
else if(input$reg_options == "PR9"){
source('polynRegression9th_function.r')
polynRegression9th_function(time()[1:input$samplesize],data[1:input$samplesize])}
else if(input$reg_options == "PR10"){
source('polynRegression10th_function.r')
polynRegression10th_function(time()[1:input$samplesize],data[1:input$samplesize])}}
else if(input$reg=="grow"){
source('exponentialGrowth_function.r')
exponentialGrowth_function(time()[1:input$samplesize],data[1:input$samplesize], input$reg_options)
}
else if(input$reg=="decay"){
source('exponentialDecay_function.r')
exponentialDecay_function(time()[1:input$samplesize],data[1:input$samplesize], input$reg_options)
}
else if(input$reg=="scurve"){
source('Scurve_function.r')
Scurve_function(time()[1:input$samplesize],data[1:input$samplesize], input$reg_options)
}
})
output$plot_reg <- renderPlot({
print(regInput())
})
output$down_reg <- downloadHandler(
filename = "Shinyplot.png",
content = function(file) {
png(file)
plot(regInput())
dev.off()
})
output$plot_acf <- renderPlot({
print(acfInput())
})
output$down_acf <- downloadHandler(
filename = "Shinyplot.png",
content = function(file) {
png(file)
plot(acfInput())
dev.off()
})
coef_test <- eventReactive(input$coef_test, {
#attach(get(input$radio))
data<-get(input$reg_channel)
source('coefficienttesting_function.r')
coefficienttesting_function(time()[1:input$samplesize],data[1:input$samplesize])
})
output$coef_table <- renderTable({
coef_test()
})
output$coef_down <- downloadHandler(
filename = function() { paste('coef_test.csv', sep='') },
content = function(file) {
write.csv(coef_test(), file)
})
#--------end of test tab
#------filter tab-----
output$filter_data <- renderUI({
if(input$radio != "Node_574")
selectInput("filter_channel","selecting the channels", choices = names(dataset()[,3:5]))
else
selectInput("filter_channel","selecting the channels", choices = names(dataset()[,3:4]))
})
output$filter_args <- renderUI({
if(input$filter_options=="MA"){
numericInput("filter_args","Tau", 8 , min=2, max =50)
}
else if(input$filter_options=="SP"){
numericInput("filter_args","Smoothing value", 0.9 , min=2, max =50)
}
})
output$plot_filter <- renderPlot({
#attach(get(input$radio))
data<-get(input$filter_channel)
if(input$filter_options=="MA"){
source('movingAverage_function.r')
movingAverage_function(time()[1:input$samplesize],data[1:input$samplesize],input$filter_args)}
else if(input$filter_options=="SP"){
source('Spline_function.r')
Spline_function(time()[1:input$samplesize],data[1:input$samplesize],input$filter_args)}
else if(input$filter_options=="Kalman"){
# source('.r')
# _function(time()[1:input$samplesize],data[1:input$samplesize],input$tau)
}
})
#---end of filter tab-----
##-------info tab--------
#information of how to use the app,
# - what inputs to be given
# - formula for processing and
# - the resultant output
#---------end of info tab--------
})
| /server.r | no_license | akshara-chukkannagari/Gis_Project | R | false | false | 17,624 | r | library(shiny)
library(dygraphs)
library(datasets)
library(xts)
library(plyr)
##---functions----------
## any functions goes here
##---end of functions-------
## Define server logic required to generate and plot a random distribution
shinyServer(function(input,output) {
dataset <- reactive({
if(input$file_check==TRUE){#--start of else if for selecting file upload
inFile <- input$file
if (is.null(inFile))
return(NULL)
#data<-read.table(inFile$datapath, sep=" ")
data<- read.csv(inFile$datapath, header = T)
}#--end of else if for selecting file upload
else{
sensor = input$radio#"Node_383"
channels = "acceleration-x,acceleration-y,acceleration-z,air:temperature"
date_start = input$date_start#"2015-03-26"
time_start = input$time_start#"13:16:01"
date_end = input$date_end#"2015-03-26"
time_end = input$time_end#"13:17:15"
url <- paste0("http://quader.igg.tu-berlin.de/istsos/bridgedemoservice?service=SOS&request=GetObservation&offering=temporary&procedure=",sensor,
"&observedProperty=",channels,
"&responseFormat=text/plain&version=1.0.0&eventTime=", date_start,"T",time_start,"+0100/",date_end,"T",time_end,"+0100")
data <- read.csv(file=url, header = TRUE)# read csv file
}
#--renaming for data from istsos
urn = as.character(lapply(strsplit(names(data), split="1.0."), "[", 1))
colnames = as.character(lapply(strsplit(names(data), split="1.0."), "[", 2))
names(data)<-paste(colnames)
names(data)[2]<- paste("procedure")
data
})
########----data for testing , given by THOMAS bECKER-----------
# Node_383 <- read.csv(file="Node383.csv", header = TRUE)# read csv file
# Node_384 <- read.csv(file="Node384.csv", header = TRUE, skip =15)# read csv file
# Node_573 <- read.csv(file="Node573.csv", header = TRUE, skip =15)# read csv file
# Node_574 <- read.csv(file="Node574.csv", header = TRUE, skip =15)# read csv file
#--datset chooseing--
# dataset <- reactive({
# if(input$radio == "Node_383")
# data <- Node_383
#
# else if(input$radio == "Node_384")
# data <- Node_384
#
# else if(input$radio == "Node_573")
# data <- Node_573
#
# else if(input$radio == "Node_574")
# data <- Node_574
#
# # else if(input$file)
# # fileinput <- input$file
# # #if(is.null(fileinput)){return()}
# # read.table(file=file$datapath)
#
# else("Error selecting a Node")
# })
################----end of datset---------
output$datasize <- renderUI({
sliderInput('samplesize','Data Size', min=10, max=nrow(dataset()),
value=min(500,nrow(dataset())),
step=50,
round=0)
})
output$dateRangeText <- renderText({
paste("input$dateRange is",
paste(as.character(input$dateRange), collapse = " to ")
)
})
#-------time conversion----------
################ time stamp conversion for test datasets
# time<- reactive({
#
# timestamp = dataset()[,1]
# date = as.character(lapply(strsplit(as.character(timestamp), split=" "), "[", 1))
# hms = as.character(lapply(strsplit(as.character(timestamp), split=" "), "[", 2))
# h = as.numeric(lapply(strsplit(hms, split=":"), "[", 1))
# m = as.numeric(lapply(strsplit(hms, split=":"), "[", 2))
# s = as.numeric(lapply(strsplit(hms, split=":"), "[", 3))
#
# time_sec = NULL
# for(i in 1:length(timestamp))
# {
# time_sec[i] = h[i]*3600 + m[i]*60 + s[i]
# }
# time_sec
####################
time<- reactive({
timestamp = dataset()[,1]
date = as.character(lapply(strsplit(as.character(timestamp), split="T"), "[", 1))
hms = as.character(lapply(strsplit(as.character(timestamp), split="T"), "[", 2))
h = as.numeric(lapply(strsplit(hms, split=":"), "[", 1))
m = as.numeric(lapply(strsplit(hms, split=":"), "[", 2))
# s = as.numeric(lapply(strsplit(hms, split=":"), "[", 3)) # gave NAs introduced by coercion
# Cutting characters manually
s = as.numeric(substr(hms, 7, 15)) # characters 7 to 15
# Gets UTC component, also cut manually
utcComp = as.numeric(substr(hms, 17, 18))
h=h+utcComp # adding UTC to hours value
# Filling time vector
time_sec = NULL
for(i in 1:length(timestamp))
{
time_sec[i] = h[i]*3600 + m[i]*60 + s[i]
}
time_sec
})
#----end of time conversion---------
#--------summary tab-----------------
output$dataname<- renderText({
if(input$file_check==TRUE){
paste("summary of dataset from File uploaded:")}
else{
paste("summary of dataset", input$radio)}
})
# output$choose_plot <- renderUI({
# radioButtons(inputId="choose_plot",
# label="Choose a type of plot for visualization ",
# choices=list(
# "Plot" = "plot",
# "Dygraphs Plot" = "dyplot"),
# selected="plot")
# })
output$plot_raw <- renderPlot({
if(input$radio == "node_574"){
ch1<- dataset()$acceleration.x[1:input$samplesize]
ch2<- dataset()$acceleration.y[1:input$samplesize]
plot(range(time()[1:input$samplesize], na.rm=T), range(c(ch1, ch2)), na.rm=T, type='n',xlab ="Time [in seconds]",ylab ="Accelerations", title ="Plot of accelerations of Raw data")
lines(time()[1:input$samplesize], ch1, col="red")
lines(time()[1:input$samplesize], ch2, col="green")}
else {
ch1<- dataset()$acceleration.x[1:input$samplesize]
ch2<- dataset()$acceleration.y[1:input$samplesize]
ch3<- dataset()$acceleration.z[1:input$samplesize]
plot(range(time()[1:input$samplesize], na.rm=T), range(c(ch1, ch2, ch3)), na.rm=T, type='n',xlab="Time [in seconds]", ylab ="Accelerations", main ="Plot of accelerations of Raw data")
lines(time()[1:input$samplesize], ch1, col="red")
lines(time()[1:input$samplesize], ch2, col="green")
lines(time()[1:input$samplesize], ch3, col="blue")}
})
output$raw_data <- renderUI({
if(input$radio != "node_574")
selectInput("raw_channel","select any data for ploting and Staionarity Check", choices = names(dataset()[,3:6]))
else
selectInput("raw_channel","select any data for ploting and Staionarity Check", choices = names(dataset()[,3:5]))
})
output$plot_temp <- renderPlot({
#attach(get(input$radio))
ch<-get(input$raw_channel)
if(input$radio == "Node_574"){
title<-paste(input$raw_channel," plot")
plot(time()[1:input$samplesize],ch[1:input$samplesize], type="l",col="red",xlab="Time [in seconds]", ylab ="Data", main = title)}
else{
title<-paste(input$raw_channel," plot")
plot(time()[1:input$samplesize],ch[1:input$samplesize], type="l",col="red",xlab="Time [in seconds]", ylab ="Data", main =title)}
})
outpt_text <- renderText({paste("Stationarity Test")})
output$st_test <- renderText({
attach(dataset())
data<-get(input$raw_channel)
source('stationarity_function.r')
stationarity_function(time()[1:input$samplesize],data[1:input$samplesize])
})
output$plot <- renderDygraph({
#test <- cbind(test1,test2,test3,test4)
dygraph(dataset()[,3:6], main = "Node383",) %>%
dyOptions(drawPoints = TRUE, pointSize= 1.3 ,colors = RColorBrewer::brewer.pal(4, "Set1"),)%>%
dyLegend(width= 550)%>%
dySeries("..1", axis = "y" , label = "Channel 1")%>%
dySeries("..2", axis = "y" , label = "Channel 2")%>%
dySeries("..3", axis = "y" , label = "Channel 3")%>%
dySeries("..4", axis = "y2" , label = "Channel 4")%>%
dyAxis("y", label = "Channel 1, 2 and 3")%>%
dyAxis("y2", label = "Channel 4")%>%
dyRangeSelector()
})
#-------end of summary-------------
#--------data tab----------------
output$data <- renderTable({
head(dataset(),n = input$samplesize)
})
output$data_down <- downloadHandler(
filename = function() { paste(input$radio, '.csv', sep='') },
content = function(file) {
write.csv(dataset(), file)
})
#-----end of data------------------
#--------statistic tab----------------
output$samplesize<- renderText({
if(input$file_check==TRUE){
paste(input$samplesize, "observations of dataset from File uploaded:")}
else{
paste(input$samplesize, "observations of dataset", input$radio)}
})
output$stats <- renderPrint({
summary(dataset()[input$samplesize,3:6])
})
#-----end of statistic------------------
#----similarity tab--------
output$acf_data <- renderUI({
if(input$radio != "Node_574")
selectInput("acf_channel","selecting the channels", choices = names(dataset()[,3:6]))
else
selectInput("acf_channel","selecting the channels", choices = names(dataset()[,3:5]))
})
acfInput <- reactive({
#attach(get(input$radio))
data<-get(input$acf_channel)[1:input$samplesize]
ac_res <- acf(data,length(data)/input$acf_lag, type = input$acf_options, na.action = na.pass, col="red") # type should be "correlation", "covariance", "partial"
})
output$plot_acf <- renderPlot({
print(acfInput())
})
output$down_acf <- downloadHandler(
filename = "Shinyplot.png",
content = function(file) {
png(file)
plot(acfInput())
dev.off()
})
output$ccf_data1 <- renderUI({
if(input$radio != "Node_574")
selectInput("ccf_channel1","selecting the channels", choices = names(dataset()[,3:6]))
else
selectInput("ccf_channel1","selecting the channels", choices = names(dataset()[,3:5]))
})
output$ccf_data2 <- renderUI({
if(input$radio != "Node_574")
selectInput("ccf_channel2","selecting the channels", choices = names(dataset()[,3:6]), selected = names(dataset()[4]))
else
selectInput("ccf_channel2","selecting the channels", choices = names(dataset()[,3:5]), selected = names(dataset()[4]))
})
ccfInput <- reactive({
#attach(get(input$radio))
ch1<-get(input$ccf_channel1)
ch2<-get(input$ccf_channel2)
data1= data.frame(time()[1:input$samplesize], ch1[1:input$samplesize])
data2= data.frame(time()[1:input$samplesize], ch2[1:input$samplesize])
source('crossCorr_function.r')
crossCorr_function(data1, data2, input$ccf_lag)
})
output$plot_ccf <- renderPlot({
print(ccfInput())
})
output$down_ccf <- downloadHandler(
filename = "Shinyplot.png",
content = function(file) {
png(file)
plot(ccfInput())
dev.off()
})
#-----end of similarity tab----
##----refression tab-------
output$reg <- renderUI({
selectInput("reg","choose a Regression Model", choices = c("Linear and Polynomial"="reg","Exponential Growth"="grow","Exponential Decay"="decay", "S Curve"="scurve"))
})
output$reg_data <- renderUI({
if(input$radio != "Node_574")
selectInput("reg_channel","selecting the channels", choices = names(dataset()[,3:6]))
else
selectInput("reg_channel","selecting the channels", choices = names(dataset()[,3:5]))
})
coef_test <- eventReactive(input$coef_test, {
#attach(get(input$radio))
data<-get(input$reg_channel)
source('coefficienttesting_function.r')
coefficienttesting_function(time()[1:input$samplesize],data[1:input$samplesize])
})
output$coef_table <- renderTable({
coef_test()
})
output$reg_options <- renderUI({
if(input$reg=="reg"){
selectInput("reg_options","Operations",
c("Linear Regression" = "LM",
"2nd order polyn Regression"= "PR2",
"3nd order polyn Regression"= "PR3",
"4nd order polyn Regression"= "PR4",
"5nd order polyn Regression"= "PR5",
"6nd order polyn Regression"= "PR6",
"7nd order polyn Regression"= "PR7",
"8nd order polyn Regression"= "PR8",
"9nd order polyn Regression"= "PR9",
"10nd order polyn Regression"= "PR10"),
selected="LM")
}
else if(input$reg =="grow"){
numericInput("reg_options","Alpha", 3.5 , min=2, max =50)
}
else if(input$reg =="decay"){
numericInput("reg_options","Alpha", 3.5 , min=2, max =50)
}
else if(input$reg =="scurve"){
numericInput("reg_options","Coefficient a", 3.5 , min=2, max =50)
}
})
regInput <- reactive({
#attach(get(input$radio))
data<-get(input$reg_channel)
if(input$reg=="reg"){
if(input$reg_options == "LM"){
source('linearRegression_function.r')
linearRegression_function(time()[1:input$samplesize],data[1:input$samplesize])}
else if(input$reg_options == "PR2"){
source('polynRegression2nd_function.r')
polynRegression2nd_function(time()[1:input$samplesize],data[1:input$samplesize])}
else if(input$reg_options == "PR3"){
source('polynRegression3rd_function.r')
polynRegression3rd_function(time()[1:input$samplesize],data[1:input$samplesize])}
else if(input$reg_options == "PR4"){
source('polynRegression4th_function.r')
polynRegression4th_function(time()[1:input$samplesize],data[1:input$samplesize])}
else if(input$reg_options == "PR5"){
source('polynRegression5th_function.r')
polynRegression5th_function(time()[1:input$samplesize],data[1:input$samplesize])}
else if(input$reg_options == "PR6"){
source('polynRegression6th_function.r')
polynRegression6th_function(time()[1:input$samplesize],data[1:input$samplesize])}
else if(input$reg_options == "PR7"){
source('polynRegression7th_function.r')
polynRegression7th_function(time()[1:input$samplesize],data[1:input$samplesize])}
else if(input$reg_options == "PR8"){
source('polynRegression8th_function.r')
polynRegression8th_function(time()[1:input$samplesize],data[1:input$samplesize])}
else if(input$reg_options == "PR9"){
source('polynRegression9th_function.r')
polynRegression9th_function(time()[1:input$samplesize],data[1:input$samplesize])}
else if(input$reg_options == "PR10"){
source('polynRegression10th_function.r')
polynRegression10th_function(time()[1:input$samplesize],data[1:input$samplesize])}}
else if(input$reg=="grow"){
source('exponentialGrowth_function.r')
exponentialGrowth_function(time()[1:input$samplesize],data[1:input$samplesize], input$reg_options)
}
else if(input$reg=="decay"){
source('exponentialDecay_function.r')
exponentialDecay_function(time()[1:input$samplesize],data[1:input$samplesize], input$reg_options)
}
else if(input$reg=="scurve"){
source('Scurve_function.r')
Scurve_function(time()[1:input$samplesize],data[1:input$samplesize], input$reg_options)
}
})
output$plot_reg <- renderPlot({
print(regInput())
})
output$down_reg <- downloadHandler(
filename = "Shinyplot.png",
content = function(file) {
png(file)
plot(regInput())
dev.off()
})
output$plot_acf <- renderPlot({
print(acfInput())
})
output$down_acf <- downloadHandler(
filename = "Shinyplot.png",
content = function(file) {
png(file)
plot(acfInput())
dev.off()
})
coef_test <- eventReactive(input$coef_test, {
#attach(get(input$radio))
data<-get(input$reg_channel)
source('coefficienttesting_function.r')
coefficienttesting_function(time()[1:input$samplesize],data[1:input$samplesize])
})
output$coef_table <- renderTable({
coef_test()
})
output$coef_down <- downloadHandler(
filename = function() { paste('coef_test.csv', sep='') },
content = function(file) {
write.csv(coef_test(), file)
})
#--------end of test tab
#------filter tab-----
output$filter_data <- renderUI({
if(input$radio != "Node_574")
selectInput("filter_channel","selecting the channels", choices = names(dataset()[,3:5]))
else
selectInput("filter_channel","selecting the channels", choices = names(dataset()[,3:4]))
})
output$filter_args <- renderUI({
if(input$filter_options=="MA"){
numericInput("filter_args","Tau", 8 , min=2, max =50)
}
else if(input$filter_options=="SP"){
numericInput("filter_args","Smoothing value", 0.9 , min=2, max =50)
}
})
output$plot_filter <- renderPlot({
#attach(get(input$radio))
data<-get(input$filter_channel)
if(input$filter_options=="MA"){
source('movingAverage_function.r')
movingAverage_function(time()[1:input$samplesize],data[1:input$samplesize],input$filter_args)}
else if(input$filter_options=="SP"){
source('Spline_function.r')
Spline_function(time()[1:input$samplesize],data[1:input$samplesize],input$filter_args)}
else if(input$filter_options=="Kalman"){
# source('.r')
# _function(time()[1:input$samplesize],data[1:input$samplesize],input$tau)
}
})
#---end of filter tab-----
##-------info tab--------
#information of how to use the app,
# - what inputs to be given
# - formula for processing and
# - the resultant output
#---------end of info tab--------
})
|
###########################################################
#SARS-CoV-2 mutation spectrum
###########################################################
#color and themes
library(RColorBrewer)
brewer.pal(12, name="Paired")
Self1<-c("#1F78B4","#A6CEE3","#33A02C","#B2DF8A","#E31A1C","#FB9A99","#FF7F00","#FDBF6F","#6A3D9A","#CAB2D6","#B15928","#FFFF99" )
Self<-c("#AAAAAA","#AAAAAA","#AAAAAA","#AAAAAA","#E31A1C","#FB9A99","#AAAAAA","#AAAAAA","#AAAAAA","#AAAAAA","#AAAAAA","#AAAAAA" )
Self3<-c("#1F78B4","#33A02C","#E31A1C","#FF7F00","#6A3D9A","#B15928" )
theme_set(theme_bw()+theme(panel.grid=element_blank(),panel.border=element_rect(size=1,color="black")))
my_theme<-theme(axis.line.x=element_line(size=0,color="black"),axis.line.y=element_line(size=0,color="black"),
axis.ticks=element_line(size=0.5,color="black"),axis.ticks.length=unit(0.05,"inches"),
axis.title.x = element_text(size=10),axis.title.y = element_text(size=10),
axis.text.x = element_text(angle = 45,hjust = 1,size=8,color="black"),
axis.text.y = element_text(size=10,color="black"),
strip.text.x = element_text(size=10,face = "bold"),
strip.background = element_rect(color = "black",size=1),
legend.position = 1,
legend.text = element_text(size=10),legend.title = element_text(size=10))
###########################################################
setwd("/Dell/Dell13/shankj/projects/Cov/Plot/20210414/Fig1/")
###########################################################
#SARS-CoV-2 mutation the in negative strand
###########################################################
#1.parameter's cut-off
Ctrl<-as.data.frame(fread("/Dell/Dell13/shankj/projects/Cov/Plot/20210414/Fig1/Vero_Total_Barcode_SNP.csv",stringsAsFactors = F))
Gaussi<-read.table("/Dell/Dell13/shankj/projects/Cov/Plot/20210414/Fig1/Guassi.txt",sep = "\t",header = F)
head(Ctrl)
head(Gaussi)
colnames(Gaussi)<-c("Pos","Mismatch_Number","Total_Number")
Gaussi$Frac<-Gaussi$Mismatch_Number/Gaussi$Total_Number
Ctrl<-merge(Ctrl,Gaussi,by=c("Pos"))
write.csv(Ctrl,"/Dell/Dell13/shankj/projects/Cov/Plot/20210414/Fig1/Vero_Total_Barcode_SNP_Mismatch.csv",row.names = F,quote = F)
#Ctrl<-as.data.frame(fread("/Dell/Dell13/shankj/projects/Cov/Plot/20210414/Fig1/Vero_Total_Barcode_SNP_Mismatch.csv",stringsAsFactors = F))
#FigS2A
Filter<-filter(Ctrl,
!Pos %in% c(4402,5062,8782,28144),#The four konwn polymorphisms sites, which were different between our SARS-CoV-2 reference genome and BetaCoV/Korea/KCDC03/2020
!UMI %in% c("26257-26283")) #Known indel in BetaCoV/Korea/KCDC03/2020
head(Filter)
plot<-as.data.frame(Filter) %>% group_by(SNP) %>% dplyr::summarise(count=n())
max(plot$count)
pdf("Control_noCutoff_1.pdf",width = 3,height = 3)
plot$SNP<-factor(plot$SNP,levels = c("C > T","G > A","A > G","T > C","G > T","C > A","G > C","C > G","A > C","T > G","A > T","T > A"))
ggplot(data=plot, aes(x=factor(SNP),y=count,fill=SNP)) +
geom_bar(position="dodge", stat="identity",color="black",width = 0.8)+
scale_fill_manual(values = Self1)+
labs(x='',y='Mutation count')+
scale_y_continuous(breaks = seq(0,24000,6000),limits = c(0,24000))+
my_theme
dev.off()
#FigS2B
Filter<-filter(Ctrl,
!Pos %in% c(4402,5062,8782,28144),
UMI_Alt_no_PCR_reads>=2,
UMI_ref_reads==0,
!UMI %in% c("26257-26283")) #,
#UMI_Alt_no_PCR_reads==1
head(Filter)
plot<-as.data.frame(Filter) %>% group_by(SNP) %>% dplyr::summarise(count=n())
max(plot$count)
pdf("Control_Cutoff_C1and2.pdf",width = 3,height = 3)
plot$SNP<-factor(plot$SNP,levels = c("C > T","G > A","A > G","T > C","G > T","C > A","G > C","C > G","A > C","T > G","A > T","T > A"))
ggplot(data=plot, aes(x=factor(SNP),y=count,fill=SNP)) +
geom_bar(position="dodge", stat="identity",color="black",width = 0.8)+
scale_fill_manual(values = Self1)+
labs(x='',y='Mutation count')+ my_theme
#my_theme
dev.off()
#FigS2C
Filter<-filter(Ctrl,
!Pos %in% c(4402,5062,8782,28144),
Dis >= 15,
UMI_Alt_no_PCR_reads>=2,
UMI_ref_reads==0,
!UMI %in% c("26257-26283")) #,
#UMI_Alt_no_PCR_reads==1
head(Filter)
plot<-as.data.frame(Filter) %>% group_by(SNP) %>% dplyr::summarise(count=n())
max(plot$count)
pdf("Control_Cutoff_C1and2and3.pdf",width = 3,height = 3)
plot$SNP<-factor(plot$SNP,levels = c("C > T","G > A","A > G","T > C","G > T","C > A","G > C","C > G","A > C","T > G","A > T","T > A"))
ggplot(data=plot, aes(x=factor(SNP),y=count,fill=SNP)) +
geom_bar(position="dodge", stat="identity",color="black",width = 0.8)+
scale_fill_manual(values = Self1)+scale_y_continuous(limits = c(0,250))+
labs(x='',y='Mutation count')+ my_theme
#my_theme
dev.off()
###########################################################
#To discard potential polymorphisms
#cut-off 0.2%
###########################################################
#FigS2D
#cut-off 0.2%
Gaussi<-read.table("/Dell/Dell13/shankj/projects/Cov/Plot/20210414/Fig1/Guassi.txt",sep = "\t",header = F)
head(Gaussi)
colnames(Gaussi)<-c("Pos","Mismatch_Number","Total_Number")
Gaussi$Frac<-Gaussi$Mismatch_Number/Gaussi$Total_Number
ggplot(Gaussi) +
#geom_vline(xintercept = log2(20),color="red",linetype="dotted")+
labs(x='log10(mismatch read number/total read number)')+
#scale_x_continuous(limits = c(0,0.01)) +
geom_density(aes(x =log10(Frac), y =..count..))
Gaussi<-filter(Gaussi, Frac<0.9,Frac>0)
library(mixtools)
mid<-mixtools::normalmixEM(log10(Gaussi$Frac), arbvar = T, epsilon = 1e-03)
mid$lambda
mid$mu
mid$sigma
pnorm(mean = -2.423157,sd=0.5649736,lower.tail = T,q=log10(0.002))*0.06368319
pnorm(mean = -3.044717 ,sd=0.2420202,lower.tail = T,q=log10(0.002))*0.93631681
0.01991428/(0.01991428+0.8646311)
ggplot(Gaussi) +
labs(x='log10(mismatch read number/total read number)')+
geom_density(aes(x =log10(Frac)))+my_theme
pdf("/Dell/Dell13/shankj/projects/Cov/Plot/20210609/FigS3_E-2.pdf",width = 3,height = 3)
data.frame(x = mid$x) %>%
ggplot() +
geom_histogram(aes(x, y =..density..), binwidth = .2, colour = "grey30", fill = "grey", lwd = .5) +
stat_function(geom = "line", fun = plot_mix_comps, # here is the function
args = list(mid$mu[1], mid$sigma[1], lam = mid$lambda[1]),
colour = "red", lwd = 1) +
stat_function(geom = "line", fun = plot_mix_comps, # and here again because k = 2
args = list(mid$mu[2], mid$sigma[2], lam = mid$lambda[2]),
colour = "blue", lwd = 1) +
labs(x='log10(mismatch read number/total read number)',y="Density")+ my_theme+#2+guides(fill=F)+
geom_vline(xintercept=log10(0.002),col="black",linetype="dashed",lwd=1)
dev.off()
head(Ctrl)
###########################################################
#De novo mutations in Vero
#Fig1F
###########################################################
Filter<-filter(Ctrl,
!Pos %in% c(4402,5062,8782,28144),
Dis >= 15,
UMI_Alt_no_PCR_reads>=2,
UMI_ref_reads==0,
All_Alt_reads/Total_Number <= 0.002,
!UMI %in% c("26257-26283")) #,
#UMI_Alt_no_PCR_reads==1
head(Filter)
CT_Pos<-filter(Filter,SNP=="C > T")$Pos
#mutation spectrum
plot<-as.data.frame(Filter) %>% group_by(SNP) %>% dplyr::summarise(count=n())
sum(plot$count)
pdf("Fig1F.pdf",width = 3,height = 3)
plot$SNP<-factor(plot$SNP,levels = c("C > T","G > A","A > G","T > C","G > T","C > A","G > C","C > G","A > C","T > G","A > T","T > A"))
ggplot(data=plot, aes(x=factor(SNP),y=count,fill=SNP)) +
geom_bar(position="dodge", stat="identity",color="black",width = 0.8)+
scale_fill_manual(values = Self1)+
labs(x='',y='Mutation count')+ my_theme
#my_theme
dev.off()
#Count
ts<-sum(51,15,35,21)
tv<-sum(8,4,6,6,9,24,11,7)
ts+tv
binom.test(ts,(ts+tv),4/12)
binom.test(24,tv,1/8)
#Freq
#Coverage_consensus_reads.txt
#Base count in Consensus reads
#A 3631127
#T 3212571
#C 2354397
#G 2175628
ts<-c(9.65E-06,2.17E-05,6.90E-06,6.55E-06)
tv<-c(2.21E-06,1.10E-06,2.55E-06,2.55E-06,4.14E-06,1.10E-05,3.43E-06,2.18E-06)
t.test(log10(ts),log10(tv))
wilcox.test(c(9.65E-06,2.17E-05,6.90E-06,6.55E-06),
c(2.21E-06,1.10E-06,2.55E-06,2.55E-06,4.14E-06,1.10E-05,3.43E-06,2.18E-06))
t.test(c(2.21E-06,1.10E-06,2.55E-06,2.55E-06,4.14E-06,1.10E-05,3.43E-06,2.18E-06), mu = 1.10E-05)
#C>U VS G>A
fisher.test(matrix(c(51,15,2354397,2185628),nrow = 2))
# G>U VS C>A
fisher.test(matrix(c(24,6,2185628,2354397),nrow = 2))
#Fisher exact test, only use the sites covered by junction read pairs
fisher.test(matrix(c(22,6,(5863-22),(5492-6)),nrow=2))
#Potential_mutation_site.txt
# Var1 Freq
# A 8929
# C 5492
# G 5863
# T 9594
###########################################################
#Consenus VS Unconsensus
#FigS3A
###########################################################
#mismatch frequency
#Consensuse
Consensus<-as.data.frame(fread("/Dell/Dell13/shankj/projects/Cov/SARS_CoV2/ConsensusVSunconsensus/Consensus.result2",stringsAsFactors = F,header = F))
colnames(Consensus)<-c("Aver","SNP","Cover","SNP_Freq")
head(Consensus)
##Inconsensuse
Inconsensuse<-as.data.frame(fread("/Dell/Dell13/shankj/projects/Cov/SARS_CoV2/ConsensusVSunconsensus/Unconsensus.result2",stringsAsFactors = F,header = F))
colnames(Inconsensuse)<-c("BQ","SNP","Cover","SNP_Freq")
head(Inconsensuse)
pdf("ConsensusVSInconsensue_Mismatch_Freq.pdf",height = 3,width = 3,useDingbats = F)
ggplot()+
geom_point(data=Consensus,aes(x=Aver,y=log10(SNP_Freq)),col="black")+
geom_point(data=Inconsensuse,aes(x=BQ,y=log10(SNP_Freq)),col="red")+
scale_y_continuous(breaks = seq(-5,0,1),limits = c(-5,0))+
my_theme
dev.off()
###################################################################
#Small indel
#FigS3D-E
###################################################################
test<-read.table("/Dell/Dell13/shankj/projects/Cov/SARS_CoV2/Vero_JCR_default/JCR_indel/Polymorphism_Consensuse.Indel.txt",header = F,sep="\t")
colnames(test)<-c("Fraction","Barcode","Barcode_All_Read_pair_number","Covered_reads_number","Covered_readpair_number","Pos","Indel","Alt","Indel_len","Dis","Reads_Pos","Reads","Reads_PCR","non_PCR_number","SNP_read_number")
Filter<-filter(test,Fraction<=0.002,abs(Dis)>=15)
write.csv(Count,"/Dell/Dell13/shankj/projects/Cov/SARS_CoV2/Vero_JCR_default/JCR_indel/Indel.csv",row.names = F,quote = F)
Count<-Filter %>% group_by(Pos,Indel,Alt,Indel_len) %>% dplyr::summarise(count=n())
nrow(Count)
ggplot()+
geom_histogram(data=filter(Count,count==1),aes(x=Indel_len),binwidth=1)+
labs(x='Indel length(Insertion:+ \t Deletion:-)')+
my_theme1
#Distance
ggplot()+
geom_histogram(data=Filter,aes(x=Dis),binwidth=1)+
scale_x_continuous(limits = c(-260,170)) +
labs(x='Distance to junction site')+#my_theme1
my_theme2+guides(fill=F)
#read distance
as.vector(test$Reads_Pos)
mid<-as.data.frame(na.omit(as.numeric(unlist(map(test$Reads_Pos,~str_split(.,','))))))
colnames(mid)<-"ReadPosition"
ggplot()+
geom_histogram(data=mid,aes(x=ReadPosition),binwidth=1)+
#scale_x_continuous(limits = c(0,220)) +
labs(x='Read Position')+my_theme
###################################################################
#A549
###################################################################
tmp <- list.files(path = "/Dell/Dell13/shankj/projects/Cov/SARS_CoV2/Read2/",pattern = "*.UMI.mutation.countPvalue.csv")
tmp2 <- list.files(path = "/Dell/Dell13/shankj/projects/Cov/SARS_CoV2/Read2/",pattern = "*.mismatch")
Filter<-data.frame()
for (i in 1:3) {
Mismatch<-read.table(paste0("/Dell/Dell13/shankj/projects/Cov/SARS_CoV2/Read2/",tmp2[i]),header = T,stringsAsFactors = F)
colnames(Mismatch)<-c("Pos","Mismatch_Number","Total_Number")
Mutation<-read.csv(paste0("/Dell/Dell13/shankj/projects/Cov/SARS_CoV2/Read2/",tmp[i]),header = T,stringsAsFactors = F)
name=sub(".UMI.mutation.countPvalue.csv","",tmp[i])
#Mutation$Qvalue<-qvalue(Mutation$Pvalue)$qvalue
Mid<-merge(Mutation,Mismatch,by=c("Pos"))
Mid$sample<-rep(name,nrow(Mid))
Filter<-rbind(Filter,Mid)
rm(Mid)
rm(Mutation)
}
#Filter$Qvalue<-qvalue(Filter$Pvalue)$qvalue
head(Filter)
nrow(Filter)
write.csv(Filter,"A549.csv",row.names = F,quote = F)
head(Mid)
#C8782T,T28144C,C18060T
Mid<-filter(Filter,Dis >= 15,
UMI_Alt_no_PCR_reads>=2,
UMI_ref_reads==0,
!Pos %in% c(18060,8782,28144)) #Dis >= 15, Count <=5,UMI_Alt_Fraction==1
write.csv(Mid,"A549_Filter",row.names = F,quote = F)
plot<-as.data.frame(Mid %>% group_by(SNP) %>% dplyr::summarise(count=n()))
sum(plot$count)
binom.test(29,sum(5+12+11+1+4+29+11+10),1/8)
binom.test(65,sum(65+14+5+8),1/4)
pdf("A549_1.pdf",width = 3,height = 3)
plot$SNP<-factor(plot$SNP,levels = c("C > T","G > A","A > G","T > C","G > T","C > A","G > C","C > G","A > C","T > G","A > T","T > A"))
ggplot(data=plot, aes(x=factor(SNP),y=count,fill=SNP)) +
geom_bar(position="dodge", stat="identity",color="black",width = 0.8)+
scale_fill_manual(values = Self1)+scale_y_continuous(limits = c(0,80)) +
labs(x='',y='Mutation count')+ my_theme2+guides(fill=F)
dev.off()
# A 8637
# C 5338
# G 5677
# T 9253
fisher.test(matrix(c(29,11,(5677-29),(5338-11)),nrow=2))
#mutation rate
#C 260205+283168+273443
#A 378420+410749+394759
#G 239705+262490+254775
#T 293026+316413+303995
#
fisher.test(matrix(c(29,11,(239705+262490+254775),(260205+283168+273443)),nrow=2))
fisher.test(matrix(c(65,5,(260205+283168+273443),(239705+262490+254775)),nrow=2))
Mutation<-read.table("/Dell/Dell13/shankj/projects/Cov/Plot/20210414/Fig1/A549.txt",header = T,sep="\t")
head(Mutation)
Mutation$SNP<-factor(Mutation$SNP,levels = c("C > T","G > A","A > G","T > C","G > T","C > A","G > C","C > G","A > C","T > G","A > T","T > A"))
pdf("/Dell/Dell13/shankj/projects/Cov/Plot/20210519/SciAdv_VS_Vero_rate.pdf",height = 3,width = 3,useDingbats = F)
ggplot(data=Mutation ,aes(x=SCV2_count,y=SciAdv,col=SNP,size=3) )+ #y=log10(as.numeric(Error_Fraction)+10^(-11))
geom_point()+scale_y_continuous(limits = c(0,300)) +scale_x_continuous(limits = c(0,52)) +
scale_color_manual(values = Self1)+
labs(x='SARS-CoV-2 mutation rate in Vero',y='SARS-CoV-2 mutation from Sci. Adv.') +
my_theme2+guides(col=F,size=F)
dev.off()
cor.test(Mutation$SCV2_count,Mutation$SciAdv,method = "s")
ggplot(data=Mutation,aes(x=A549,y=SARS_CoV2_Polymorphism,col=SNP,size=3) )+ #y=log10(as.numeric(Error_Fraction)+10^(-11))
geom_point()+#scale_y_continuous(limits = c(-6,-4)) +scale_x_continuous(limits = c(-6,-4.5)) +
scale_color_manual(values = Self1)+
labs(x='# of SARS-CoV-2 mutation rate from A549',y='# of SARS-CoV-2 polymorphisms') +
my_theme2+guides(col=F,size=F)
cor.test(log10(Mutation$SCV2_count),log10(Mutation$A549))
ggplot(data=Mutation ,aes(x=SCV2_Rate,y=A549_Rate,col=SNP,size=3) )+ #y=log10(as.numeric(Error_Fraction)+10^(-11))
geom_point()+
scale_color_manual(values = Self1)+
scale_x_continuous(limits = c(0,2.5*10^-5)) +
labs(x='log10(SARS-CoV-2 mutation rate from Vero)',y='log10(SARS-CoV-2 mutation rate from A549)') +
my_theme2+guides(col=F,size=F)
cor.test(log10(Mutation$SCV2_Rate),log10(Mutation$A549_Rate))
pdf("/Dell/Dell13/shankj/projects/Cov/Plot/20210519/A549_VS_Vero_count.pdf",height = 3,width = 3,useDingbats = F)
ggplot(data=Mutation ,aes(x=SCV2_count,y=A549,col=SNP,size=3) )+ #y=log10(as.numeric(Error_Fraction)+10^(-11))
geom_point()+scale_y_continuous(limits = c(0,80)) +scale_x_continuous(limits = c(0,52)) +
scale_color_manual(values = Self1)+
labs(x='SARS-CoV-2 mutation count from Vero',y='SARS-CoV-2 mutation count from A549') +
my_theme2+guides(col=F,size=F)
dev.off()
cor.test(log10(Mutation$SCV2_count),log10(Mutation$A549))
pdf("/Dell/Dell13/shankj/projects/Cov/Plot/20210519/A549_rate.pdf",height = 3,width = 3,useDingbats = F)
ggplot(data=Mutation, aes(x=factor(SNP),y=A549_Rate*10^5,fill=SNP)) +
geom_bar(position="dodge", stat="identity",color="black",width = 0.8)+
scale_fill_manual(values = Self1)+#scale_y_continuous(limits = c(0,25)) +
labs(x='',y='Mutation rate (x10^-5)')+
my_theme2+guides(fill=F)
dev.off()
t.test(c(4.22323e-06,1.01358e-05,1.34669e-05,1.22427e-06,1.20425e-05,1.09477e-05), mu = 3.83106e-05)
t.test(c(1.18250e-05,6.60528e-06,8.75816e-06), mu = 7.95773e-05)
pdf("/Dell/Dell13/shankj/projects/Cov/Plot/20210712/20210716/Fig2D-1.pdf",height = 3,width = 3,useDingbats = F)
ggplot(data=Mutation ,aes(x=log10(SCV2_count),y=log10(SARS_CoV2_Polymorphism),col=SNP,size=3) )+ #y=log10(as.numeric(Error_Fraction)+10^(-11))
geom_point()+scale_y_continuous(limits = c(1,3.5)) +scale_x_continuous(limits = c(0.5,2)) +
scale_color_manual(values = Self1)+
labs(x='SARS-CoV-2 mutation in Vero',y='SARS-CoV-2 mutation polymorphism')# +
#my_theme2+guides(col=F,size=F)
dev.off()
cor.test(log10(Mutation$SCV2_count),log10(Mutation$SARS_CoV2_Polymorphism))
############################################################################################
#Fig1C
############################################################################################
Junction<-as.data.frame(fread("/Dell/Dell13/shankj/projects/Cov/Plot/20210609/Junction_Site_mismatch_consensus_reads.txt",stringsAsFactors = F))
pdf("/Dell/Dell13/shankj/projects/Cov/Plot/20210609/Consensus_Mismatch.pdf",height = 3,width = 3,useDingbats = F)
colnames(Junction)<-c("Dis","Mis","Cover")
ggplot(data=Junction, aes(x=(Dis),y=Mis/Cover)) + #geom_abline(intercept=0.5, slope=0,color="red")+
geom_bar(position="dodge", stat="identity",width = 0.5)+
#theme_classic()+theme(panel.background=element_rect(colour='black'))+
labs(x='Position(Junction site=0) ',y='Mismatch number')+#barplot_theme +
#theme(axis.text.x = element_text(angle = 45, hjust = 0.5, vjust = 0.5))+
scale_x_continuous(breaks = seq(-30,30,15),limits = c(-30,30))+
scale_y_continuous(limits = c(0,0.025))+my_theme2
dev.off()
###############################################################################################
#FigS1C
###############################################################################################
Junction<-read.table("/Dell/Dell13/shankj/projects/Cov/SCV2SJ.out.tab",sep="\t",header = F,stringsAsFactors = F)
head(Junction)
colnames(Junction)<-c("Chr","start","end","strand","intron_motif","annotated","uniquely_mapped","multiple_mapped","Overhang")
#Junction2<-separate(Junction,UMI,into = c('start','end'),sep = '-')[,1:3]
Junction$sum<-Junction$uniquely_mapped+Junction$multiple_mapped
nrow(filter(Junction))
cutoff<-2^5
for (i in c(1:nrow(Junction))){
if (Junction[i,10]> cutoff){
Junction[i,10]<- cutoff
}
}
pdf("FigS1C.pdf",height = 4,width = 5.5)
jpeg("FigS1C.jpg",units = "cm",height = 8, width = 11,res = 300)
jpeg("/Dell/Dell13/shankj/projects/Cov/Plot/20210702/FigS1C_nolegend.jpg",units = "cm",height = 8, width = 8,res = 300)
ggplot() +
geom_point(data=Junction, aes(x=as.numeric(start),y=-as.numeric(end),color=log2(sum)),shape=15,size=0.2)+
geom_point(data=filter(Junction,start==3108,end==28158),aes(x=as.numeric(start),y=-as.numeric(end)),color="red",size=2,shape=23)+
scale_color_continuous(type = "viridis")+my_theme2+
ylab("End")+xlab("Start")+guides(col=F)
dev.off()
mid<-Junction %>% group_by(sum) %>% dplyr::summarise(count=n())
sum(filter(mid,count<=20)$count)
| /Plot/Script/Fig1.R | no_license | kjshan/SARS-CoV-2-Mutation-Spectrum | R | false | false | 19,936 | r | ###########################################################
#SARS-CoV-2 mutation spectrum
###########################################################
#color and themes
library(RColorBrewer)
brewer.pal(12, name="Paired")
Self1<-c("#1F78B4","#A6CEE3","#33A02C","#B2DF8A","#E31A1C","#FB9A99","#FF7F00","#FDBF6F","#6A3D9A","#CAB2D6","#B15928","#FFFF99" )
Self<-c("#AAAAAA","#AAAAAA","#AAAAAA","#AAAAAA","#E31A1C","#FB9A99","#AAAAAA","#AAAAAA","#AAAAAA","#AAAAAA","#AAAAAA","#AAAAAA" )
Self3<-c("#1F78B4","#33A02C","#E31A1C","#FF7F00","#6A3D9A","#B15928" )
theme_set(theme_bw()+theme(panel.grid=element_blank(),panel.border=element_rect(size=1,color="black")))
my_theme<-theme(axis.line.x=element_line(size=0,color="black"),axis.line.y=element_line(size=0,color="black"),
axis.ticks=element_line(size=0.5,color="black"),axis.ticks.length=unit(0.05,"inches"),
axis.title.x = element_text(size=10),axis.title.y = element_text(size=10),
axis.text.x = element_text(angle = 45,hjust = 1,size=8,color="black"),
axis.text.y = element_text(size=10,color="black"),
strip.text.x = element_text(size=10,face = "bold"),
strip.background = element_rect(color = "black",size=1),
legend.position = 1,
legend.text = element_text(size=10),legend.title = element_text(size=10))
###########################################################
setwd("/Dell/Dell13/shankj/projects/Cov/Plot/20210414/Fig1/")
###########################################################
#SARS-CoV-2 mutation the in negative strand
###########################################################
#1.parameter's cut-off
Ctrl<-as.data.frame(fread("/Dell/Dell13/shankj/projects/Cov/Plot/20210414/Fig1/Vero_Total_Barcode_SNP.csv",stringsAsFactors = F))
Gaussi<-read.table("/Dell/Dell13/shankj/projects/Cov/Plot/20210414/Fig1/Guassi.txt",sep = "\t",header = F)
head(Ctrl)
head(Gaussi)
colnames(Gaussi)<-c("Pos","Mismatch_Number","Total_Number")
Gaussi$Frac<-Gaussi$Mismatch_Number/Gaussi$Total_Number
Ctrl<-merge(Ctrl,Gaussi,by=c("Pos"))
write.csv(Ctrl,"/Dell/Dell13/shankj/projects/Cov/Plot/20210414/Fig1/Vero_Total_Barcode_SNP_Mismatch.csv",row.names = F,quote = F)
#Ctrl<-as.data.frame(fread("/Dell/Dell13/shankj/projects/Cov/Plot/20210414/Fig1/Vero_Total_Barcode_SNP_Mismatch.csv",stringsAsFactors = F))
#FigS2A
Filter<-filter(Ctrl,
!Pos %in% c(4402,5062,8782,28144),#The four konwn polymorphisms sites, which were different between our SARS-CoV-2 reference genome and BetaCoV/Korea/KCDC03/2020
!UMI %in% c("26257-26283")) #Known indel in BetaCoV/Korea/KCDC03/2020
head(Filter)
plot<-as.data.frame(Filter) %>% group_by(SNP) %>% dplyr::summarise(count=n())
max(plot$count)
pdf("Control_noCutoff_1.pdf",width = 3,height = 3)
plot$SNP<-factor(plot$SNP,levels = c("C > T","G > A","A > G","T > C","G > T","C > A","G > C","C > G","A > C","T > G","A > T","T > A"))
ggplot(data=plot, aes(x=factor(SNP),y=count,fill=SNP)) +
geom_bar(position="dodge", stat="identity",color="black",width = 0.8)+
scale_fill_manual(values = Self1)+
labs(x='',y='Mutation count')+
scale_y_continuous(breaks = seq(0,24000,6000),limits = c(0,24000))+
my_theme
dev.off()
#FigS2B
Filter<-filter(Ctrl,
!Pos %in% c(4402,5062,8782,28144),
UMI_Alt_no_PCR_reads>=2,
UMI_ref_reads==0,
!UMI %in% c("26257-26283")) #,
#UMI_Alt_no_PCR_reads==1
head(Filter)
plot<-as.data.frame(Filter) %>% group_by(SNP) %>% dplyr::summarise(count=n())
max(plot$count)
pdf("Control_Cutoff_C1and2.pdf",width = 3,height = 3)
plot$SNP<-factor(plot$SNP,levels = c("C > T","G > A","A > G","T > C","G > T","C > A","G > C","C > G","A > C","T > G","A > T","T > A"))
ggplot(data=plot, aes(x=factor(SNP),y=count,fill=SNP)) +
geom_bar(position="dodge", stat="identity",color="black",width = 0.8)+
scale_fill_manual(values = Self1)+
labs(x='',y='Mutation count')+ my_theme
#my_theme
dev.off()
#FigS2C
Filter<-filter(Ctrl,
!Pos %in% c(4402,5062,8782,28144),
Dis >= 15,
UMI_Alt_no_PCR_reads>=2,
UMI_ref_reads==0,
!UMI %in% c("26257-26283")) #,
#UMI_Alt_no_PCR_reads==1
head(Filter)
plot<-as.data.frame(Filter) %>% group_by(SNP) %>% dplyr::summarise(count=n())
max(plot$count)
pdf("Control_Cutoff_C1and2and3.pdf",width = 3,height = 3)
plot$SNP<-factor(plot$SNP,levels = c("C > T","G > A","A > G","T > C","G > T","C > A","G > C","C > G","A > C","T > G","A > T","T > A"))
ggplot(data=plot, aes(x=factor(SNP),y=count,fill=SNP)) +
geom_bar(position="dodge", stat="identity",color="black",width = 0.8)+
scale_fill_manual(values = Self1)+scale_y_continuous(limits = c(0,250))+
labs(x='',y='Mutation count')+ my_theme
#my_theme
dev.off()
###########################################################
#To discard potential polymorphisms
#cut-off 0.2%
###########################################################
#FigS2D
#cut-off 0.2%
Gaussi<-read.table("/Dell/Dell13/shankj/projects/Cov/Plot/20210414/Fig1/Guassi.txt",sep = "\t",header = F)
head(Gaussi)
colnames(Gaussi)<-c("Pos","Mismatch_Number","Total_Number")
Gaussi$Frac<-Gaussi$Mismatch_Number/Gaussi$Total_Number
ggplot(Gaussi) +
#geom_vline(xintercept = log2(20),color="red",linetype="dotted")+
labs(x='log10(mismatch read number/total read number)')+
#scale_x_continuous(limits = c(0,0.01)) +
geom_density(aes(x =log10(Frac), y =..count..))
Gaussi<-filter(Gaussi, Frac<0.9,Frac>0)
library(mixtools)
mid<-mixtools::normalmixEM(log10(Gaussi$Frac), arbvar = T, epsilon = 1e-03)
mid$lambda
mid$mu
mid$sigma
pnorm(mean = -2.423157,sd=0.5649736,lower.tail = T,q=log10(0.002))*0.06368319
pnorm(mean = -3.044717 ,sd=0.2420202,lower.tail = T,q=log10(0.002))*0.93631681
0.01991428/(0.01991428+0.8646311)
ggplot(Gaussi) +
labs(x='log10(mismatch read number/total read number)')+
geom_density(aes(x =log10(Frac)))+my_theme
pdf("/Dell/Dell13/shankj/projects/Cov/Plot/20210609/FigS3_E-2.pdf",width = 3,height = 3)
data.frame(x = mid$x) %>%
ggplot() +
geom_histogram(aes(x, y =..density..), binwidth = .2, colour = "grey30", fill = "grey", lwd = .5) +
stat_function(geom = "line", fun = plot_mix_comps, # here is the function
args = list(mid$mu[1], mid$sigma[1], lam = mid$lambda[1]),
colour = "red", lwd = 1) +
stat_function(geom = "line", fun = plot_mix_comps, # and here again because k = 2
args = list(mid$mu[2], mid$sigma[2], lam = mid$lambda[2]),
colour = "blue", lwd = 1) +
labs(x='log10(mismatch read number/total read number)',y="Density")+ my_theme+#2+guides(fill=F)+
geom_vline(xintercept=log10(0.002),col="black",linetype="dashed",lwd=1)
dev.off()
head(Ctrl)
###########################################################
#De novo mutations in Vero
#Fig1F
###########################################################
Filter<-filter(Ctrl,
!Pos %in% c(4402,5062,8782,28144),
Dis >= 15,
UMI_Alt_no_PCR_reads>=2,
UMI_ref_reads==0,
All_Alt_reads/Total_Number <= 0.002,
!UMI %in% c("26257-26283")) #,
#UMI_Alt_no_PCR_reads==1
head(Filter)
CT_Pos<-filter(Filter,SNP=="C > T")$Pos
#mutation spectrum
plot<-as.data.frame(Filter) %>% group_by(SNP) %>% dplyr::summarise(count=n())
sum(plot$count)
pdf("Fig1F.pdf",width = 3,height = 3)
plot$SNP<-factor(plot$SNP,levels = c("C > T","G > A","A > G","T > C","G > T","C > A","G > C","C > G","A > C","T > G","A > T","T > A"))
ggplot(data=plot, aes(x=factor(SNP),y=count,fill=SNP)) +
geom_bar(position="dodge", stat="identity",color="black",width = 0.8)+
scale_fill_manual(values = Self1)+
labs(x='',y='Mutation count')+ my_theme
#my_theme
dev.off()
#Count
ts<-sum(51,15,35,21)
tv<-sum(8,4,6,6,9,24,11,7)
ts+tv
binom.test(ts,(ts+tv),4/12)
binom.test(24,tv,1/8)
#Freq
#Coverage_consensus_reads.txt
#Base count in Consensus reads
#A 3631127
#T 3212571
#C 2354397
#G 2175628
ts<-c(9.65E-06,2.17E-05,6.90E-06,6.55E-06)
tv<-c(2.21E-06,1.10E-06,2.55E-06,2.55E-06,4.14E-06,1.10E-05,3.43E-06,2.18E-06)
t.test(log10(ts),log10(tv))
wilcox.test(c(9.65E-06,2.17E-05,6.90E-06,6.55E-06),
c(2.21E-06,1.10E-06,2.55E-06,2.55E-06,4.14E-06,1.10E-05,3.43E-06,2.18E-06))
t.test(c(2.21E-06,1.10E-06,2.55E-06,2.55E-06,4.14E-06,1.10E-05,3.43E-06,2.18E-06), mu = 1.10E-05)
#C>U VS G>A
fisher.test(matrix(c(51,15,2354397,2185628),nrow = 2))
# G>U VS C>A
fisher.test(matrix(c(24,6,2185628,2354397),nrow = 2))
#Fisher exact test, only use the sites covered by junction read pairs
fisher.test(matrix(c(22,6,(5863-22),(5492-6)),nrow=2))
#Potential_mutation_site.txt
# Var1 Freq
# A 8929
# C 5492
# G 5863
# T 9594
###########################################################
#Consenus VS Unconsensus
#FigS3A
###########################################################
#mismatch frequency
#Consensuse
Consensus<-as.data.frame(fread("/Dell/Dell13/shankj/projects/Cov/SARS_CoV2/ConsensusVSunconsensus/Consensus.result2",stringsAsFactors = F,header = F))
colnames(Consensus)<-c("Aver","SNP","Cover","SNP_Freq")
head(Consensus)
##Inconsensuse
Inconsensuse<-as.data.frame(fread("/Dell/Dell13/shankj/projects/Cov/SARS_CoV2/ConsensusVSunconsensus/Unconsensus.result2",stringsAsFactors = F,header = F))
colnames(Inconsensuse)<-c("BQ","SNP","Cover","SNP_Freq")
head(Inconsensuse)
pdf("ConsensusVSInconsensue_Mismatch_Freq.pdf",height = 3,width = 3,useDingbats = F)
ggplot()+
geom_point(data=Consensus,aes(x=Aver,y=log10(SNP_Freq)),col="black")+
geom_point(data=Inconsensuse,aes(x=BQ,y=log10(SNP_Freq)),col="red")+
scale_y_continuous(breaks = seq(-5,0,1),limits = c(-5,0))+
my_theme
dev.off()
###################################################################
#Small indel
#FigS3D-E
###################################################################
test<-read.table("/Dell/Dell13/shankj/projects/Cov/SARS_CoV2/Vero_JCR_default/JCR_indel/Polymorphism_Consensuse.Indel.txt",header = F,sep="\t")
colnames(test)<-c("Fraction","Barcode","Barcode_All_Read_pair_number","Covered_reads_number","Covered_readpair_number","Pos","Indel","Alt","Indel_len","Dis","Reads_Pos","Reads","Reads_PCR","non_PCR_number","SNP_read_number")
Filter<-filter(test,Fraction<=0.002,abs(Dis)>=15)
write.csv(Count,"/Dell/Dell13/shankj/projects/Cov/SARS_CoV2/Vero_JCR_default/JCR_indel/Indel.csv",row.names = F,quote = F)
Count<-Filter %>% group_by(Pos,Indel,Alt,Indel_len) %>% dplyr::summarise(count=n())
nrow(Count)
ggplot()+
geom_histogram(data=filter(Count,count==1),aes(x=Indel_len),binwidth=1)+
labs(x='Indel length(Insertion:+ \t Deletion:-)')+
my_theme1
#Distance
ggplot()+
geom_histogram(data=Filter,aes(x=Dis),binwidth=1)+
scale_x_continuous(limits = c(-260,170)) +
labs(x='Distance to junction site')+#my_theme1
my_theme2+guides(fill=F)
#read distance
as.vector(test$Reads_Pos)
mid<-as.data.frame(na.omit(as.numeric(unlist(map(test$Reads_Pos,~str_split(.,','))))))
colnames(mid)<-"ReadPosition"
ggplot()+
geom_histogram(data=mid,aes(x=ReadPosition),binwidth=1)+
#scale_x_continuous(limits = c(0,220)) +
labs(x='Read Position')+my_theme
###################################################################
#A549
###################################################################
tmp <- list.files(path = "/Dell/Dell13/shankj/projects/Cov/SARS_CoV2/Read2/",pattern = "*.UMI.mutation.countPvalue.csv")
tmp2 <- list.files(path = "/Dell/Dell13/shankj/projects/Cov/SARS_CoV2/Read2/",pattern = "*.mismatch")
Filter<-data.frame()
for (i in 1:3) {
Mismatch<-read.table(paste0("/Dell/Dell13/shankj/projects/Cov/SARS_CoV2/Read2/",tmp2[i]),header = T,stringsAsFactors = F)
colnames(Mismatch)<-c("Pos","Mismatch_Number","Total_Number")
Mutation<-read.csv(paste0("/Dell/Dell13/shankj/projects/Cov/SARS_CoV2/Read2/",tmp[i]),header = T,stringsAsFactors = F)
name=sub(".UMI.mutation.countPvalue.csv","",tmp[i])
#Mutation$Qvalue<-qvalue(Mutation$Pvalue)$qvalue
Mid<-merge(Mutation,Mismatch,by=c("Pos"))
Mid$sample<-rep(name,nrow(Mid))
Filter<-rbind(Filter,Mid)
rm(Mid)
rm(Mutation)
}
#Filter$Qvalue<-qvalue(Filter$Pvalue)$qvalue
head(Filter)
nrow(Filter)
write.csv(Filter,"A549.csv",row.names = F,quote = F)
head(Mid)
#C8782T,T28144C,C18060T
Mid<-filter(Filter,Dis >= 15,
UMI_Alt_no_PCR_reads>=2,
UMI_ref_reads==0,
!Pos %in% c(18060,8782,28144)) #Dis >= 15, Count <=5,UMI_Alt_Fraction==1
write.csv(Mid,"A549_Filter",row.names = F,quote = F)
plot<-as.data.frame(Mid %>% group_by(SNP) %>% dplyr::summarise(count=n()))
sum(plot$count)
binom.test(29,sum(5+12+11+1+4+29+11+10),1/8)
binom.test(65,sum(65+14+5+8),1/4)
pdf("A549_1.pdf",width = 3,height = 3)
plot$SNP<-factor(plot$SNP,levels = c("C > T","G > A","A > G","T > C","G > T","C > A","G > C","C > G","A > C","T > G","A > T","T > A"))
ggplot(data=plot, aes(x=factor(SNP),y=count,fill=SNP)) +
geom_bar(position="dodge", stat="identity",color="black",width = 0.8)+
scale_fill_manual(values = Self1)+scale_y_continuous(limits = c(0,80)) +
labs(x='',y='Mutation count')+ my_theme2+guides(fill=F)
dev.off()
# A 8637
# C 5338
# G 5677
# T 9253
fisher.test(matrix(c(29,11,(5677-29),(5338-11)),nrow=2))
#mutation rate
#C 260205+283168+273443
#A 378420+410749+394759
#G 239705+262490+254775
#T 293026+316413+303995
#
fisher.test(matrix(c(29,11,(239705+262490+254775),(260205+283168+273443)),nrow=2))
fisher.test(matrix(c(65,5,(260205+283168+273443),(239705+262490+254775)),nrow=2))
Mutation<-read.table("/Dell/Dell13/shankj/projects/Cov/Plot/20210414/Fig1/A549.txt",header = T,sep="\t")
head(Mutation)
Mutation$SNP<-factor(Mutation$SNP,levels = c("C > T","G > A","A > G","T > C","G > T","C > A","G > C","C > G","A > C","T > G","A > T","T > A"))
pdf("/Dell/Dell13/shankj/projects/Cov/Plot/20210519/SciAdv_VS_Vero_rate.pdf",height = 3,width = 3,useDingbats = F)
ggplot(data=Mutation ,aes(x=SCV2_count,y=SciAdv,col=SNP,size=3) )+ #y=log10(as.numeric(Error_Fraction)+10^(-11))
geom_point()+scale_y_continuous(limits = c(0,300)) +scale_x_continuous(limits = c(0,52)) +
scale_color_manual(values = Self1)+
labs(x='SARS-CoV-2 mutation rate in Vero',y='SARS-CoV-2 mutation from Sci. Adv.') +
my_theme2+guides(col=F,size=F)
dev.off()
cor.test(Mutation$SCV2_count,Mutation$SciAdv,method = "s")
ggplot(data=Mutation,aes(x=A549,y=SARS_CoV2_Polymorphism,col=SNP,size=3) )+ #y=log10(as.numeric(Error_Fraction)+10^(-11))
geom_point()+#scale_y_continuous(limits = c(-6,-4)) +scale_x_continuous(limits = c(-6,-4.5)) +
scale_color_manual(values = Self1)+
labs(x='# of SARS-CoV-2 mutation rate from A549',y='# of SARS-CoV-2 polymorphisms') +
my_theme2+guides(col=F,size=F)
cor.test(log10(Mutation$SCV2_count),log10(Mutation$A549))
ggplot(data=Mutation ,aes(x=SCV2_Rate,y=A549_Rate,col=SNP,size=3) )+ #y=log10(as.numeric(Error_Fraction)+10^(-11))
geom_point()+
scale_color_manual(values = Self1)+
scale_x_continuous(limits = c(0,2.5*10^-5)) +
labs(x='log10(SARS-CoV-2 mutation rate from Vero)',y='log10(SARS-CoV-2 mutation rate from A549)') +
my_theme2+guides(col=F,size=F)
cor.test(log10(Mutation$SCV2_Rate),log10(Mutation$A549_Rate))
pdf("/Dell/Dell13/shankj/projects/Cov/Plot/20210519/A549_VS_Vero_count.pdf",height = 3,width = 3,useDingbats = F)
ggplot(data=Mutation ,aes(x=SCV2_count,y=A549,col=SNP,size=3) )+ #y=log10(as.numeric(Error_Fraction)+10^(-11))
geom_point()+scale_y_continuous(limits = c(0,80)) +scale_x_continuous(limits = c(0,52)) +
scale_color_manual(values = Self1)+
labs(x='SARS-CoV-2 mutation count from Vero',y='SARS-CoV-2 mutation count from A549') +
my_theme2+guides(col=F,size=F)
dev.off()
cor.test(log10(Mutation$SCV2_count),log10(Mutation$A549))
pdf("/Dell/Dell13/shankj/projects/Cov/Plot/20210519/A549_rate.pdf",height = 3,width = 3,useDingbats = F)
ggplot(data=Mutation, aes(x=factor(SNP),y=A549_Rate*10^5,fill=SNP)) +
geom_bar(position="dodge", stat="identity",color="black",width = 0.8)+
scale_fill_manual(values = Self1)+#scale_y_continuous(limits = c(0,25)) +
labs(x='',y='Mutation rate (x10^-5)')+
my_theme2+guides(fill=F)
dev.off()
t.test(c(4.22323e-06,1.01358e-05,1.34669e-05,1.22427e-06,1.20425e-05,1.09477e-05), mu = 3.83106e-05)
t.test(c(1.18250e-05,6.60528e-06,8.75816e-06), mu = 7.95773e-05)
pdf("/Dell/Dell13/shankj/projects/Cov/Plot/20210712/20210716/Fig2D-1.pdf",height = 3,width = 3,useDingbats = F)
ggplot(data=Mutation ,aes(x=log10(SCV2_count),y=log10(SARS_CoV2_Polymorphism),col=SNP,size=3) )+ #y=log10(as.numeric(Error_Fraction)+10^(-11))
geom_point()+scale_y_continuous(limits = c(1,3.5)) +scale_x_continuous(limits = c(0.5,2)) +
scale_color_manual(values = Self1)+
labs(x='SARS-CoV-2 mutation in Vero',y='SARS-CoV-2 mutation polymorphism')# +
#my_theme2+guides(col=F,size=F)
dev.off()
cor.test(log10(Mutation$SCV2_count),log10(Mutation$SARS_CoV2_Polymorphism))
############################################################################################
#Fig1C
############################################################################################
Junction<-as.data.frame(fread("/Dell/Dell13/shankj/projects/Cov/Plot/20210609/Junction_Site_mismatch_consensus_reads.txt",stringsAsFactors = F))
pdf("/Dell/Dell13/shankj/projects/Cov/Plot/20210609/Consensus_Mismatch.pdf",height = 3,width = 3,useDingbats = F)
colnames(Junction)<-c("Dis","Mis","Cover")
ggplot(data=Junction, aes(x=(Dis),y=Mis/Cover)) + #geom_abline(intercept=0.5, slope=0,color="red")+
geom_bar(position="dodge", stat="identity",width = 0.5)+
#theme_classic()+theme(panel.background=element_rect(colour='black'))+
labs(x='Position(Junction site=0) ',y='Mismatch number')+#barplot_theme +
#theme(axis.text.x = element_text(angle = 45, hjust = 0.5, vjust = 0.5))+
scale_x_continuous(breaks = seq(-30,30,15),limits = c(-30,30))+
scale_y_continuous(limits = c(0,0.025))+my_theme2
dev.off()
###############################################################################################
#FigS1C
###############################################################################################
Junction<-read.table("/Dell/Dell13/shankj/projects/Cov/SCV2SJ.out.tab",sep="\t",header = F,stringsAsFactors = F)
head(Junction)
colnames(Junction)<-c("Chr","start","end","strand","intron_motif","annotated","uniquely_mapped","multiple_mapped","Overhang")
#Junction2<-separate(Junction,UMI,into = c('start','end'),sep = '-')[,1:3]
Junction$sum<-Junction$uniquely_mapped+Junction$multiple_mapped
nrow(filter(Junction))
cutoff<-2^5
for (i in c(1:nrow(Junction))){
if (Junction[i,10]> cutoff){
Junction[i,10]<- cutoff
}
}
pdf("FigS1C.pdf",height = 4,width = 5.5)
jpeg("FigS1C.jpg",units = "cm",height = 8, width = 11,res = 300)
jpeg("/Dell/Dell13/shankj/projects/Cov/Plot/20210702/FigS1C_nolegend.jpg",units = "cm",height = 8, width = 8,res = 300)
ggplot() +
geom_point(data=Junction, aes(x=as.numeric(start),y=-as.numeric(end),color=log2(sum)),shape=15,size=0.2)+
geom_point(data=filter(Junction,start==3108,end==28158),aes(x=as.numeric(start),y=-as.numeric(end)),color="red",size=2,shape=23)+
scale_color_continuous(type = "viridis")+my_theme2+
ylab("End")+xlab("Start")+guides(col=F)
dev.off()
mid<-Junction %>% group_by(sum) %>% dplyr::summarise(count=n())
sum(filter(mid,count<=20)$count)
|
##' Starts GUI
##'
##' Start a web-based Graphical User Interface (GUI) to perform some analyses that the rhr package provides.
##' @title rhrGUI
##' @export
rhrGUI <- function() {
shiny::runApp(system.file('gui', package='rhr'))
}
| /R/rhrGUI.R | no_license | jmsigner/rhr | R | false | false | 240 | r | ##' Starts GUI
##'
##' Start a web-based Graphical User Interface (GUI) to perform some analyses that the rhr package provides.
##' @title rhrGUI
##' @export
rhrGUI <- function() {
shiny::runApp(system.file('gui', package='rhr'))
}
|
with(a84621d596bbd47ab9fecf15f5a2b2595, {ROOT <- 'D:/SEMOSS_v4.0.0_x64/SEMOSS_v4.0.0_x64/semosshome/db/Atadata2__3b3e4a3b-d382-4e98-9950-9b4e8b308c1c/version/75a73e73-db77-4861-bc03-c0f1aeb0de8b';rm(list=ls())}); | /75a73e73-db77-4861-bc03-c0f1aeb0de8b/R/Temp/aATvheaBlGQ9z.R | no_license | ayanmanna8/test | R | false | false | 212 | r | with(a84621d596bbd47ab9fecf15f5a2b2595, {ROOT <- 'D:/SEMOSS_v4.0.0_x64/SEMOSS_v4.0.0_x64/semosshome/db/Atadata2__3b3e4a3b-d382-4e98-9950-9b4e8b308c1c/version/75a73e73-db77-4861-bc03-c0f1aeb0de8b';rm(list=ls())}); |
# KRUSKAL-WALLIS TEST :
Group_native <- c(8,5,7,11,9,6)
Group_water <- c(10,12,11,9,13,12)
Group_fertilizer <- c(11,14,10,16,17,12)
Group_water_fertilizer <- c(18,20,16,15,14,22)
Group <-data.frame(Group_native,Group_water,Group_fertilizer,Group_water_fertilizer)
Group
# alpha = .01, critical value :
qchisq(.99,df=3)
native<- Group$Group_native
water<- Group$Group_water
fertilizer<- Group$Group_fertilizer
water_fertilizer<- Group$Group_water_fertilizer
x1<-c(native,water,fertilizer,water_fertilizer)
x1
g<- factor(rep(1:4, c(6,6,6,6)),
labels = c("native",
"water",
"fertilizer",
"water_fertilizer"))
kruskal.test(x1, g)
# The observed K value is 16.77 and the critical is 11.3449.
# Because the observed value is greater than the table value, the null hypothesis
# is rejected. There is a signi???cant difference in the way the trees grow | /Business_Statistics_For_Contemporary_Decision_Making_by_Ken_Black/CH17/EX17.4/Ex17_4.R | permissive | FOSSEE/R_TBC_Uploads | R | false | false | 958 | r | # KRUSKAL-WALLIS TEST :
Group_native <- c(8,5,7,11,9,6)
Group_water <- c(10,12,11,9,13,12)
Group_fertilizer <- c(11,14,10,16,17,12)
Group_water_fertilizer <- c(18,20,16,15,14,22)
Group <-data.frame(Group_native,Group_water,Group_fertilizer,Group_water_fertilizer)
Group
# alpha = .01, critical value :
qchisq(.99,df=3)
native<- Group$Group_native
water<- Group$Group_water
fertilizer<- Group$Group_fertilizer
water_fertilizer<- Group$Group_water_fertilizer
x1<-c(native,water,fertilizer,water_fertilizer)
x1
g<- factor(rep(1:4, c(6,6,6,6)),
labels = c("native",
"water",
"fertilizer",
"water_fertilizer"))
kruskal.test(x1, g)
# The observed K value is 16.77 and the critical is 11.3449.
# Because the observed value is greater than the table value, the null hypothesis
# is rejected. There is a signi???cant difference in the way the trees grow |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model_plots.R
\name{mplot_density}
\alias{mplot_density}
\title{Density plot for discrete and continuous values}
\usage{
mplot_density(
tag,
score,
thresh = 6,
model_name = NA,
subtitle = NA,
save = FALSE,
subdir = NA,
file_name = "viz_distribution.png"
)
}
\arguments{
\item{tag}{Vector. Real known label}
\item{score}{Vector. Predicted value or model's result}
\item{thresh}{Integer. Threshold for selecting binary or regression
models: this number is the threshold of unique values we should
have in 'tag' (more than: regression; less than: classification)}
\item{model_name}{Character. Model's name}
\item{subtitle}{Character. Subtitle to show in plot}
\item{save}{Boolean. Save output plot into working directory}
\item{subdir}{Character. Sub directory on which you wish to save the plot}
\item{file_name}{Character. File name as you wish to save the plot}
}
\value{
Plot with distribution and performance results.
}
\description{
This function plots discrete and continuous values results
}
\examples{
Sys.unsetenv("LARES_FONT") # Temporal
data(dfr) # Results for AutoML Predictions
lapply(dfr[c(1, 3)], head)
# Plot for binomial results
mplot_density(dfr$class2$tag, dfr$class2$scores, subtitle = "Titanic Survived Model")
# Plot for regression results
mplot_density(dfr$regr$tag, dfr$regr$score, model_name = "Titanic Fare Model")
}
\seealso{
Other ML Visualization:
\code{\link{mplot_conf}()},
\code{\link{mplot_cuts_error}()},
\code{\link{mplot_cuts}()},
\code{\link{mplot_full}()},
\code{\link{mplot_gain}()},
\code{\link{mplot_importance}()},
\code{\link{mplot_lineal}()},
\code{\link{mplot_metrics}()},
\code{\link{mplot_response}()},
\code{\link{mplot_roc}()},
\code{\link{mplot_splits}()},
\code{\link{mplot_topcats}()}
}
\concept{ML Visualization}
| /man/mplot_density.Rd | no_license | laresbernardo/lares | R | false | true | 1,868 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model_plots.R
\name{mplot_density}
\alias{mplot_density}
\title{Density plot for discrete and continuous values}
\usage{
mplot_density(
tag,
score,
thresh = 6,
model_name = NA,
subtitle = NA,
save = FALSE,
subdir = NA,
file_name = "viz_distribution.png"
)
}
\arguments{
\item{tag}{Vector. Real known label}
\item{score}{Vector. Predicted value or model's result}
\item{thresh}{Integer. Threshold for selecting binary or regression
models: this number is the threshold of unique values we should
have in 'tag' (more than: regression; less than: classification)}
\item{model_name}{Character. Model's name}
\item{subtitle}{Character. Subtitle to show in plot}
\item{save}{Boolean. Save output plot into working directory}
\item{subdir}{Character. Sub directory on which you wish to save the plot}
\item{file_name}{Character. File name as you wish to save the plot}
}
\value{
Plot with distribution and performance results.
}
\description{
This function plots discrete and continuous values results
}
\examples{
Sys.unsetenv("LARES_FONT") # Temporal
data(dfr) # Results for AutoML Predictions
lapply(dfr[c(1, 3)], head)
# Plot for binomial results
mplot_density(dfr$class2$tag, dfr$class2$scores, subtitle = "Titanic Survived Model")
# Plot for regression results
mplot_density(dfr$regr$tag, dfr$regr$score, model_name = "Titanic Fare Model")
}
\seealso{
Other ML Visualization:
\code{\link{mplot_conf}()},
\code{\link{mplot_cuts_error}()},
\code{\link{mplot_cuts}()},
\code{\link{mplot_full}()},
\code{\link{mplot_gain}()},
\code{\link{mplot_importance}()},
\code{\link{mplot_lineal}()},
\code{\link{mplot_metrics}()},
\code{\link{mplot_response}()},
\code{\link{mplot_roc}()},
\code{\link{mplot_splits}()},
\code{\link{mplot_topcats}()}
}
\concept{ML Visualization}
|
file <- read.table("household_power_consumption.txt", header = TRUE, sep=";", stringsAsFactors=F, na.strings="?")
twoday <- file[file$Date %in% c("1/2/2007","2/2/2007"),]
time <- strptime(paste(twoday$Date, twoday$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
#Plot3
Sub1 <- as.numeric(twoday$Sub_metering_1)
Sub2 <- as.numeric(twoday$Sub_metering_2)
Sub3 <- as.numeric(twoday$Sub_metering_3)
png("plot3.png", width=480, height=480)
plot(time, Sub1,type="l", xlab=" ", ylab="Energy sub metering")
lines(time, Sub2, col="red")
lines(time, Sub3, col="blue")
legend("topright", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
col=c("black", "red", "blue"), lwd=par("lwd"))
dev.off()
| /Plot3.R | no_license | ggggia/ExData_Plotting1 | R | false | false | 695 | r | file <- read.table("household_power_consumption.txt", header = TRUE, sep=";", stringsAsFactors=F, na.strings="?")
twoday <- file[file$Date %in% c("1/2/2007","2/2/2007"),]
time <- strptime(paste(twoday$Date, twoday$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
#Plot3
Sub1 <- as.numeric(twoday$Sub_metering_1)
Sub2 <- as.numeric(twoday$Sub_metering_2)
Sub3 <- as.numeric(twoday$Sub_metering_3)
png("plot3.png", width=480, height=480)
plot(time, Sub1,type="l", xlab=" ", ylab="Energy sub metering")
lines(time, Sub2, col="red")
lines(time, Sub3, col="blue")
legend("topright", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
col=c("black", "red", "blue"), lwd=par("lwd"))
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/trial_image_slider_response.R
\name{trial_image_slider_response}
\alias{trial_image_slider_response}
\title{Specify an image trial with slider bar response}
\usage{
trial_image_slider_response(
stimulus,
stimulus_height = NULL,
stimulus_width = NULL,
maintain_aspect_ratio = TRUE,
labels = c("0\%", "25\%", "50\%", "75\%", "100\%"),
button_label = "Continue",
min = 0,
max = 100,
start = 50,
step = 1,
slider_width = NULL,
require_movement = FALSE,
prompt = NULL,
stimulus_duration = NULL,
trial_duration = NULL,
response_ends_trial = TRUE,
post_trial_gap = 0,
on_finish = NULL,
on_load = NULL,
data = NULL
)
}
\arguments{
\item{stimulus}{The path of the image file to be displayed.}
\item{stimulus_height}{Set the height of the image in pixels. If NULL, then the image will display at its natural height.}
\item{stimulus_width}{Set the width of the image in pixels. If NULL, then the image will display at its natural width.}
\item{maintain_aspect_ratio}{If setting only the width or only the height and this parameter is TRUE, then the other dimension will be scaled to maintain the image's aspect ratio.}
\item{labels}{Labels displayed at equidistant locations on the slider.}
\item{button_label}{Label placed on the "continue" button}
\item{min}{Minimum value of the slider}
\item{max}{Maximum value of the slider}
\item{start}{Initial value of the slider}
\item{step}{Step size of the slider}
\item{slider_width}{Horizontal width of the slider (defaults to display width)}
\item{require_movement}{Does the user need to move the slider before clicking the continue button?}
\item{prompt}{A string (may contain HTML) that will be displayed below the stimulus, intended as a reminder about the actions to take (e.g., which key to press).}
\item{stimulus_duration}{How long to show the stimulus, in milliseconds. If NULL, then the stimulus will be shown until the subject makes a response}
\item{trial_duration}{How long to wait for a response before ending trial in milliseconds. If NULL, the trial will wait indefinitely. If no response is made before the deadline is reached, the response will be recorded as NULL.}
\item{response_ends_trial}{If TRUE, then the trial will end when a response is made (or the trial_duration expires). If FALSE, the trial continues until the deadline expires.}
\item{post_trial_gap}{The gap in milliseconds between the current trial and the next trial. If NULL, there will be no gap.}
\item{on_finish}{A javascript callback function to execute when the trial finishes}
\item{on_load}{A javascript callback function to execute when the trial begins, before any loading has occurred}
\item{data}{An object containing additional data to store for the trial}
}
\value{
Functions with a \code{trial_} prefix always return a "trial" object.
A trial object is simply a list containing the input arguments, with
\code{NULL} elements removed. Logical values in the input (\code{TRUE} and
\code{FALSE}) are transformed to character vectors \code{"true"} and \code{"false"}
and are specified to be objects of class "json", ensuring that they will be
written to file as the javascript logicals, \code{true} and \code{false}.
}
\description{
The \code{trial_image_slider_response} function is used to display
an image stimulus and collect a response using a slider bar.
}
\details{
The \code{trial_image_slider_response} function belongs to the "stimulus-response"
family of trials, all of which display a stimulus of a particular type (image,
audio, video or HTML) and collect responses using a particular mechanism
(button, keyboard or slider).
This one displays an image and records responses generated with a slider.
\subsection{Stimulus display}{
For trials that display an image, the \code{stimulus} argument is a string that
specifies the path to the image file. More precisely, it must specify the path to
where the image file will be located at the time the experiment runs. Typically,
if an experiment is deployed using the \code{\link{build_experiment}()} function
all resource files will be stored in a "resource" folder, and the images will
be copied to the "image" subfolder. So if the image to be displayed is a file
called "picture.png", the \code{stimulus} path on a Mac or Linux machine would
likely be "resource/image/picture.png". Note that this path is specified relative
to the location of the primary experiment file "image.html". To make this a little
easier, the \code{\link{insert_resource}()} function can be used to construct
resource paths automatically. In the example above,
\code{stimulus = insert_resource("picture.png")} would suffice.
Other aspects to the stimulus display can be controlled with other arguments.
The \code{stimulus_height} and \code{stimulus_width} arguments can be used to
manually control the image display size by specifying the height/width in
pixels. If only one of these two arguments is specified, but the
\code{maintain_aspect_ratio} value is set to \code{TRUE}, the other dimension
of the image will automatically be scaled appropriately.
The length of time that the image remains on screen can also be customised
by setting the \code{stimulus_duration} argument: this should be a numeric
value indicating the number of milliseconds before the image disappears.
Alternatively, a value of \code{NULL} (the default) ensures that the image
remains visible until the trial ends.
}
\subsection{Response mechanism}{
Participant responses for this trial type are collected using a slider bar
that the participant can move using the mouse. Once the participant is happy
with this positioning they can click a button at the bottom of the page to
move on to the next trial. This response method can be customised in several
ways depending on the following arguments:
\itemize{
\item The \code{min} and \code{max} arguments are numeric values that specify the
minimum value (leftmost point on the slider) and the maximum value (rightmost point
on the slider) that a participant can respond with.
\item The \code{start} parameter is a numeric value that indicates where the value
of the the slider is initially position. By default this is set to the middle of
the scale, but there are many cases where it may be sensible to have the slider
bar start at one end of the scale.
\item The movement of the slider is discretised, and the granularity of this
movement can be customised using the \code{step} parameter. This should be a
numeric value that specifies the smallest possible increment that the participant
can move the slider in either direction.
\item The text labels displayed below the slider bar can also be customised by
specifying the \code{labels} parameter. This argument should be a character vector
that contains the labels to be displaed. Labels will be displayed at equally spaced
intervals along the slider, though it is possible to include blank labels to create
the impression of unequal spacing if that is required.
\item The \code{slider_width} controls the horizontal width of the slider bar:
the default value of \code{NULL} creates a slider that occupies 100\% of the width of
the jsPsych display. Note that this may not be 100\% of the screen width.
\item To ensure that participants do engage with the slider, it is possible to set
\code{require_movement = TRUE} which forces the participant to move the slider
at least once in order to be permitted to move onto the next trial.
\item The \code{button_label} argument specifies the text displayed on the button that
participants click to move to the next trial.
}
}
\subsection{Other behaviour}{
As is the case for most \code{trial_} functions there is a \code{prompt} argument,
a string that specifies additional text that is displayed on screen during the
trial. The value of \code{prompt} can contain HTML markup, allowing it to be
used quite flexibly if needed.
Depending on parameter settings, the trial can end when the subject responds
(\code{response_ends_trial = TRUE}), or after a fixed amount of time
(specified using the \code{trial_duration} argument) has elapsed. The length
of time that the stimulus remains visible can also be customized using the
(\code{stimulus_duration}) argument.
Like all functions in the \code{trial_} family it contains four additional
arguments:
\itemize{
\item The \code{post_trial_gap} argument is a numeric value specifying the
length of the pause between the current trial ending and the next one
beginning. This parameter overrides any default values defined using the
\code{\link{build_experiment}} function, and a blank screen is displayed
during this gap period.
\item The \code{on_load} and \code{on_finish} arguments can be used to
specify javascript functions that will execute before the trial begins or
after it ends. The javascript code can be written manually and inserted *as*
javascript by using the \code{\link{insert_javascript}} function. However,
the \code{fn_} family of functions supplies a variety of functions that may
be useful in many cases.
\item The \code{data} argument can be used to insert custom data values into
the jsPsych data storage for this trial.
}
}
\subsection{Data}{
When this function is called from R it returns the trial object that will
later be inserted into the experiment when \code{\link{build_experiment}}
is called. However, when the trial runs as part of the experiment it returns
values that are recorded in the jsPsych data store and eventually form part
of the data set for the experiment.
The data recorded by this trial is as follows:
\itemize{
\item The \code{rt} value is the response time in milliseconds taken for the
user to make a response. The time is measured from when the stimulus first
appears on the screen until the response.
\item The \code{response} is the numeric value of the slider bar.
\item The \code{stimulus} variable records the path to the image that was
displayed on this trial.
}
In addition, it records default variables that are recorded by all trials:
\itemize{
\item \code{trial_type} is a string that records the name of the plugin used to run the trial.
\item \code{trial_index} is a number that records the index of the current trial across the whole experiment.
\item \code{time_elapsed} counts the number of milliseconds since the start of the experiment when the trial ended.
\item \code{internal_node_id} is a string identifier for the current "node" in the timeline.
}
}
}
\seealso{
Within the "stimulus-response" family of trials, there are four types of
stimuli (image, audio, video and HTML) and three types of response options
(button, keyboard, slider). The corresponding functions are
\code{\link{trial_image_button_response}},
\code{\link{trial_image_keyboard_response}},
\code{\link{trial_image_slider_response}},
\code{\link{trial_audio_button_response}},
\code{\link{trial_audio_keyboard_response}},
\code{\link{trial_audio_slider_response}},
\code{\link{trial_video_button_response}},
\code{\link{trial_video_keyboard_response}},
\code{\link{trial_video_slider_response}},
\code{\link{trial_html_button_response}},
\code{\link{trial_html_keyboard_response}} and
\code{\link{trial_html_slider_response}}.
}
| /man/trial_image_slider_response.Rd | permissive | djnavarro/jaysire | R | false | true | 11,270 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/trial_image_slider_response.R
\name{trial_image_slider_response}
\alias{trial_image_slider_response}
\title{Specify an image trial with slider bar response}
\usage{
trial_image_slider_response(
stimulus,
stimulus_height = NULL,
stimulus_width = NULL,
maintain_aspect_ratio = TRUE,
labels = c("0\%", "25\%", "50\%", "75\%", "100\%"),
button_label = "Continue",
min = 0,
max = 100,
start = 50,
step = 1,
slider_width = NULL,
require_movement = FALSE,
prompt = NULL,
stimulus_duration = NULL,
trial_duration = NULL,
response_ends_trial = TRUE,
post_trial_gap = 0,
on_finish = NULL,
on_load = NULL,
data = NULL
)
}
\arguments{
\item{stimulus}{The path of the image file to be displayed.}
\item{stimulus_height}{Set the height of the image in pixels. If NULL, then the image will display at its natural height.}
\item{stimulus_width}{Set the width of the image in pixels. If NULL, then the image will display at its natural width.}
\item{maintain_aspect_ratio}{If setting only the width or only the height and this parameter is TRUE, then the other dimension will be scaled to maintain the image's aspect ratio.}
\item{labels}{Labels displayed at equidistant locations on the slider.}
\item{button_label}{Label placed on the "continue" button}
\item{min}{Minimum value of the slider}
\item{max}{Maximum value of the slider}
\item{start}{Initial value of the slider}
\item{step}{Step size of the slider}
\item{slider_width}{Horizontal width of the slider (defaults to display width)}
\item{require_movement}{Does the user need to move the slider before clicking the continue button?}
\item{prompt}{A string (may contain HTML) that will be displayed below the stimulus, intended as a reminder about the actions to take (e.g., which key to press).}
\item{stimulus_duration}{How long to show the stimulus, in milliseconds. If NULL, then the stimulus will be shown until the subject makes a response}
\item{trial_duration}{How long to wait for a response before ending trial in milliseconds. If NULL, the trial will wait indefinitely. If no response is made before the deadline is reached, the response will be recorded as NULL.}
\item{response_ends_trial}{If TRUE, then the trial will end when a response is made (or the trial_duration expires). If FALSE, the trial continues until the deadline expires.}
\item{post_trial_gap}{The gap in milliseconds between the current trial and the next trial. If NULL, there will be no gap.}
\item{on_finish}{A javascript callback function to execute when the trial finishes}
\item{on_load}{A javascript callback function to execute when the trial begins, before any loading has occurred}
\item{data}{An object containing additional data to store for the trial}
}
\value{
Functions with a \code{trial_} prefix always return a "trial" object.
A trial object is simply a list containing the input arguments, with
\code{NULL} elements removed. Logical values in the input (\code{TRUE} and
\code{FALSE}) are transformed to character vectors \code{"true"} and \code{"false"}
and are specified to be objects of class "json", ensuring that they will be
written to file as the javascript logicals, \code{true} and \code{false}.
}
\description{
The \code{trial_image_slider_response} function is used to display
an image stimulus and collect a response using a slider bar.
}
\details{
The \code{trial_image_slider_response} function belongs to the "stimulus-response"
family of trials, all of which display a stimulus of a particular type (image,
audio, video or HTML) and collect responses using a particular mechanism
(button, keyboard or slider).
This one displays an image and records responses generated with a slider.
\subsection{Stimulus display}{
For trials that display an image, the \code{stimulus} argument is a string that
specifies the path to the image file. More precisely, it must specify the path to
where the image file will be located at the time the experiment runs. Typically,
if an experiment is deployed using the \code{\link{build_experiment}()} function
all resource files will be stored in a "resource" folder, and the images will
be copied to the "image" subfolder. So if the image to be displayed is a file
called "picture.png", the \code{stimulus} path on a Mac or Linux machine would
likely be "resource/image/picture.png". Note that this path is specified relative
to the location of the primary experiment file "image.html". To make this a little
easier, the \code{\link{insert_resource}()} function can be used to construct
resource paths automatically. In the example above,
\code{stimulus = insert_resource("picture.png")} would suffice.
Other aspects to the stimulus display can be controlled with other arguments.
The \code{stimulus_height} and \code{stimulus_width} arguments can be used to
manually control the image display size by specifying the height/width in
pixels. If only one of these two arguments is specified, but the
\code{maintain_aspect_ratio} value is set to \code{TRUE}, the other dimension
of the image will automatically be scaled appropriately.
The length of time that the image remains on screen can also be customised
by setting the \code{stimulus_duration} argument: this should be a numeric
value indicating the number of milliseconds before the image disappears.
Alternatively, a value of \code{NULL} (the default) ensures that the image
remains visible until the trial ends.
}
\subsection{Response mechanism}{
Participant responses for this trial type are collected using a slider bar
that the participant can move using the mouse. Once the participant is happy
with this positioning they can click a button at the bottom of the page to
move on to the next trial. This response method can be customised in several
ways depending on the following arguments:
\itemize{
\item The \code{min} and \code{max} arguments are numeric values that specify the
minimum value (leftmost point on the slider) and the maximum value (rightmost point
on the slider) that a participant can respond with.
\item The \code{start} parameter is a numeric value that indicates where the value
of the the slider is initially position. By default this is set to the middle of
the scale, but there are many cases where it may be sensible to have the slider
bar start at one end of the scale.
\item The movement of the slider is discretised, and the granularity of this
movement can be customised using the \code{step} parameter. This should be a
numeric value that specifies the smallest possible increment that the participant
can move the slider in either direction.
\item The text labels displayed below the slider bar can also be customised by
specifying the \code{labels} parameter. This argument should be a character vector
that contains the labels to be displaed. Labels will be displayed at equally spaced
intervals along the slider, though it is possible to include blank labels to create
the impression of unequal spacing if that is required.
\item The \code{slider_width} controls the horizontal width of the slider bar:
the default value of \code{NULL} creates a slider that occupies 100\% of the width of
the jsPsych display. Note that this may not be 100\% of the screen width.
\item To ensure that participants do engage with the slider, it is possible to set
\code{require_movement = TRUE} which forces the participant to move the slider
at least once in order to be permitted to move onto the next trial.
\item The \code{button_label} argument specifies the text displayed on the button that
participants click to move to the next trial.
}
}
\subsection{Other behaviour}{
As is the case for most \code{trial_} functions there is a \code{prompt} argument,
a string that specifies additional text that is displayed on screen during the
trial. The value of \code{prompt} can contain HTML markup, allowing it to be
used quite flexibly if needed.
Depending on parameter settings, the trial can end when the subject responds
(\code{response_ends_trial = TRUE}), or after a fixed amount of time
(specified using the \code{trial_duration} argument) has elapsed. The length
of time that the stimulus remains visible can also be customized using the
(\code{stimulus_duration}) argument.
Like all functions in the \code{trial_} family it contains four additional
arguments:
\itemize{
\item The \code{post_trial_gap} argument is a numeric value specifying the
length of the pause between the current trial ending and the next one
beginning. This parameter overrides any default values defined using the
\code{\link{build_experiment}} function, and a blank screen is displayed
during this gap period.
\item The \code{on_load} and \code{on_finish} arguments can be used to
specify javascript functions that will execute before the trial begins or
after it ends. The javascript code can be written manually and inserted *as*
javascript by using the \code{\link{insert_javascript}} function. However,
the \code{fn_} family of functions supplies a variety of functions that may
be useful in many cases.
\item The \code{data} argument can be used to insert custom data values into
the jsPsych data storage for this trial.
}
}
\subsection{Data}{
When this function is called from R it returns the trial object that will
later be inserted into the experiment when \code{\link{build_experiment}}
is called. However, when the trial runs as part of the experiment it returns
values that are recorded in the jsPsych data store and eventually form part
of the data set for the experiment.
The data recorded by this trial is as follows:
\itemize{
\item The \code{rt} value is the response time in milliseconds taken for the
user to make a response. The time is measured from when the stimulus first
appears on the screen until the response.
\item The \code{response} is the numeric value of the slider bar.
\item The \code{stimulus} variable records the path to the image that was
displayed on this trial.
}
In addition, it records default variables that are recorded by all trials:
\itemize{
\item \code{trial_type} is a string that records the name of the plugin used to run the trial.
\item \code{trial_index} is a number that records the index of the current trial across the whole experiment.
\item \code{time_elapsed} counts the number of milliseconds since the start of the experiment when the trial ended.
\item \code{internal_node_id} is a string identifier for the current "node" in the timeline.
}
}
}
\seealso{
Within the "stimulus-response" family of trials, there are four types of
stimuli (image, audio, video and HTML) and three types of response options
(button, keyboard, slider). The corresponding functions are
\code{\link{trial_image_button_response}},
\code{\link{trial_image_keyboard_response}},
\code{\link{trial_image_slider_response}},
\code{\link{trial_audio_button_response}},
\code{\link{trial_audio_keyboard_response}},
\code{\link{trial_audio_slider_response}},
\code{\link{trial_video_button_response}},
\code{\link{trial_video_keyboard_response}},
\code{\link{trial_video_slider_response}},
\code{\link{trial_html_button_response}},
\code{\link{trial_html_keyboard_response}} and
\code{\link{trial_html_slider_response}}.
}
|
options("repos" = c(CRAN="https://mirrors.tuna.tsinghua.edu.cn/CRAN/"))
#options(prompt = "\033[1;34mR>\033[0m ")
options(prompt = "R> ")
options(continue=" ")
| /.Rprofile | no_license | andy1li/dotfiles | R | false | false | 163 | rprofile | options("repos" = c(CRAN="https://mirrors.tuna.tsinghua.edu.cn/CRAN/"))
#options(prompt = "\033[1;34mR>\033[0m ")
options(prompt = "R> ")
options(continue=" ")
|
library(bnlearn)
library(igraph)
library(Rgraphviz)
library(sna)
load('RData/experiment.missing.data.RData')
find.score = NULL
find.score$capri$id = 0
find.score$capri$score = NA
find.score$caprese$id = 0
find.score$caprese$score = NA
find.score$prim$id = 0
find.score$prim$score = NA
find.score$chowliu$id = 0
find.score$chowliu$score = NA
find.score$edmonds$id = 0
find.score$edmonds$score = NA
find.score$gabow$id = 0
find.score$gabow$score = NA
find.score$scite$score = NA
dataset = experiment.missing.data[[1,1]][[1]]
node.names = colnames(dataset$dataset)
plot_graph = function(adj, title, file) {
colnames(adj) = node.names
rownames(adj) = node.names
fontsize = rep(20, ncol(adj))
names(fontsize) = colnames(adj)
graph = graph.adjacency(adj)
nel = igraph.to.graphNEL(graph)
dev.new()
plot(nel, nodeAttrs = list(fontsize = fontsize))
title(title)
dev.copy2pdf(file = paste0('plot/', file, '.pdf'))
dev.off()
}
for(sample in 1:nrow(experiment.missing.data)) {
for (exp in 1:ncol(experiment.missing.data)) {
exec = experiment.missing.data[[sample, exp]]
for (noise in 1:length(exec)) {
experiment = exec[[noise]]
# capri
if (is.na(find.score$capri$score) || experiment$reconstruction$capri$score > find.score$capri$score) {
find.score$capri$score = experiment$reconstruction$capri$score
find.score$capri$id = exp
title = paste('\nCAPRI score:', round(find.score$capri$score, 3), ' id: ', find.score$capri$id)
plot_graph(experiment$reconstruction$capri$bic.adj, title, 'capri')
} else if (experiment$reconstruction$capri$score == find.score$capri$score) {
find.score$capri$id = c(find.score$capri$id, exp)
}
# caprese
if (is.na(find.score$caprese$score) || experiment$reconstruction$caprese$score > find.score$caprese$score) {
find.score$caprese$score = experiment$reconstruction$caprese$score
find.score$caprese$id = exp
title = paste('\nCAPRESE score:', round(find.score$caprese$score, 3), ' id: ', find.score$caprese$id)
plot_graph(experiment$reconstruction$capre$adj.caprese, title, 'caprese')
} else if (experiment$reconstruction$caprese$score == find.score$caprese$score) {
find.score$caprese$id = c(find.score$caprese$id, exp)
}
# prim
if (is.na(find.score$prim$score) || experiment$reconstruction$prim$score > find.score$prim$score) {
find.score$prim$score = experiment$reconstruction$prim$score
find.score$prim$id = exp
title = paste('\nPRIM score:', round(find.score$prim$score, 3), ' id: ', find.score$prim$id)
plot_graph(experiment$reconstruction$prim$no.reg.adj, title, 'prim')
} else if (experiment$reconstruction$prim$score == find.score$prim$score) {
find.score$prim$id = c(find.score$prim$id, exp)
}
# chowliu
if (is.na(find.score$chowliu$score) || experiment$reconstruction$chowliu$score > find.score$chowliu$score) {
find.score$chowliu$score = experiment$reconstruction$chowliu$score
find.score$chowliu$id = exp
title = paste('\nCHOW LIU score:', round(find.score$chowliu$score, 3), ' id: ', find.score$chowliu$id)
plot_graph(experiment$reconstruction$chowliu$loglik.adj, title, 'chowliu')
} else if (experiment$reconstruction$chowliu$score == find.score$chowliu$score) {
find.score$chowliu$id = c(find.score$chowliu$id, exp)
}
# edmonds
if (is.na(find.score$edmonds$score) || experiment$reconstruction$edmonds$score > find.score$edmonds$score) {
find.score$edmonds$score = experiment$reconstruction$edmonds$score
find.score$edmonds$id = exp
title = paste('\nEDMONDS score:', round(find.score$edmonds$score, 3), ' id: ', find.score$edmonds$id)
plot_graph(experiment$reconstruction$edmonds$pmi.no.reg.adj, title, 'edmonds')
} else if (experiment$reconstruction$edmonds$score == find.score$edmonds$score) {
find.score$edmonds$id = c(find.score$edmonds$id, exp)
}
# gabow
if (is.na(find.score$gabow$score) || experiment$reconstruction$gabow$score > find.score$gabow$score) {
find.score$gabow$score = experiment$reconstruction$gabow$score
find.score$gabow$id = exp
title = paste('\nGABOW score:', round(find.score$gabow$score, 3), ' id: ', find.score$gabow$id)
plot_graph(experiment$reconstruction$gabow$pmi.no.reg.adj, title, 'gabow')
} else if (experiment$reconstruction$gabow$score == find.score$gabow$score) {
find.score$gabow$id = c(find.score$gabow$id, exp)
}
}
}
}
filename = paste0('scite_output/datasets/missing/single/58_1_1_ml0')
read = readLines(paste0(filename, '.gv'))
read = gsub(' ', '', read)
read = gsub(';', '', read)
write(paste0(read, collapse = '\n'), file = paste0(filename, '.correct.gv'))
readdot = read.dot(paste0(filename, '.correct.gv'))
graph = graph.adjacency(readdot)
scite.tree.raw = get.adjacency(graph, sparse = FALSE)
scite.tree = matrix(0, ncol = ncol(scite.tree.raw) - 1, nrow = ncol(scite.tree.raw) - 1)
for (i in 1:nrow(scite.tree)) {
for(j in 1:ncol(scite.tree)) {
if (scite.tree.raw[as.character(i), as.character(j)] == 1) {
scite.tree[i,j] = 1
}
}
}
dataset = experiment.missing.data[[1,1]][[1]]
dataset = dataset$reconstructions$capri$bic.adj
colnames(scite.tree) = colnames(dataset)
rownames(scite.tree) = colnames(dataset)
load('RData/scite.RData')
colnames(scite) = colnames(dataset)
# create the igraph structure
net = empty.graph(colnames(scite), num = 4)
categoric.dataset = data.frame(apply(scite, 2, factor))
for (name in colnames(categoric.dataset)) {
levels(categoric.dataset[[name]]) = c(0,1,3)
}
amat(net[[1]]) = scite.tree
find.score$scite$score = bnlearn::score(net[[1]], categoric.dataset, type='loglik')
title = paste('\nSCITE score:', round(find.score$scite$score, 3))
plot_graph(scite.tree, title, 'scite') | /old/experiment_missing_data_2/find.score.R | no_license | BIMIB-DISCo/MST | R | false | false | 6,399 | r | library(bnlearn)
library(igraph)
library(Rgraphviz)
library(sna)
load('RData/experiment.missing.data.RData')
find.score = NULL
find.score$capri$id = 0
find.score$capri$score = NA
find.score$caprese$id = 0
find.score$caprese$score = NA
find.score$prim$id = 0
find.score$prim$score = NA
find.score$chowliu$id = 0
find.score$chowliu$score = NA
find.score$edmonds$id = 0
find.score$edmonds$score = NA
find.score$gabow$id = 0
find.score$gabow$score = NA
find.score$scite$score = NA
dataset = experiment.missing.data[[1,1]][[1]]
node.names = colnames(dataset$dataset)
plot_graph = function(adj, title, file) {
colnames(adj) = node.names
rownames(adj) = node.names
fontsize = rep(20, ncol(adj))
names(fontsize) = colnames(adj)
graph = graph.adjacency(adj)
nel = igraph.to.graphNEL(graph)
dev.new()
plot(nel, nodeAttrs = list(fontsize = fontsize))
title(title)
dev.copy2pdf(file = paste0('plot/', file, '.pdf'))
dev.off()
}
for(sample in 1:nrow(experiment.missing.data)) {
for (exp in 1:ncol(experiment.missing.data)) {
exec = experiment.missing.data[[sample, exp]]
for (noise in 1:length(exec)) {
experiment = exec[[noise]]
# capri
if (is.na(find.score$capri$score) || experiment$reconstruction$capri$score > find.score$capri$score) {
find.score$capri$score = experiment$reconstruction$capri$score
find.score$capri$id = exp
title = paste('\nCAPRI score:', round(find.score$capri$score, 3), ' id: ', find.score$capri$id)
plot_graph(experiment$reconstruction$capri$bic.adj, title, 'capri')
} else if (experiment$reconstruction$capri$score == find.score$capri$score) {
find.score$capri$id = c(find.score$capri$id, exp)
}
# caprese
if (is.na(find.score$caprese$score) || experiment$reconstruction$caprese$score > find.score$caprese$score) {
find.score$caprese$score = experiment$reconstruction$caprese$score
find.score$caprese$id = exp
title = paste('\nCAPRESE score:', round(find.score$caprese$score, 3), ' id: ', find.score$caprese$id)
plot_graph(experiment$reconstruction$capre$adj.caprese, title, 'caprese')
} else if (experiment$reconstruction$caprese$score == find.score$caprese$score) {
find.score$caprese$id = c(find.score$caprese$id, exp)
}
# prim
if (is.na(find.score$prim$score) || experiment$reconstruction$prim$score > find.score$prim$score) {
find.score$prim$score = experiment$reconstruction$prim$score
find.score$prim$id = exp
title = paste('\nPRIM score:', round(find.score$prim$score, 3), ' id: ', find.score$prim$id)
plot_graph(experiment$reconstruction$prim$no.reg.adj, title, 'prim')
} else if (experiment$reconstruction$prim$score == find.score$prim$score) {
find.score$prim$id = c(find.score$prim$id, exp)
}
# chowliu
if (is.na(find.score$chowliu$score) || experiment$reconstruction$chowliu$score > find.score$chowliu$score) {
find.score$chowliu$score = experiment$reconstruction$chowliu$score
find.score$chowliu$id = exp
title = paste('\nCHOW LIU score:', round(find.score$chowliu$score, 3), ' id: ', find.score$chowliu$id)
plot_graph(experiment$reconstruction$chowliu$loglik.adj, title, 'chowliu')
} else if (experiment$reconstruction$chowliu$score == find.score$chowliu$score) {
find.score$chowliu$id = c(find.score$chowliu$id, exp)
}
# edmonds
if (is.na(find.score$edmonds$score) || experiment$reconstruction$edmonds$score > find.score$edmonds$score) {
find.score$edmonds$score = experiment$reconstruction$edmonds$score
find.score$edmonds$id = exp
title = paste('\nEDMONDS score:', round(find.score$edmonds$score, 3), ' id: ', find.score$edmonds$id)
plot_graph(experiment$reconstruction$edmonds$pmi.no.reg.adj, title, 'edmonds')
} else if (experiment$reconstruction$edmonds$score == find.score$edmonds$score) {
find.score$edmonds$id = c(find.score$edmonds$id, exp)
}
# gabow
if (is.na(find.score$gabow$score) || experiment$reconstruction$gabow$score > find.score$gabow$score) {
find.score$gabow$score = experiment$reconstruction$gabow$score
find.score$gabow$id = exp
title = paste('\nGABOW score:', round(find.score$gabow$score, 3), ' id: ', find.score$gabow$id)
plot_graph(experiment$reconstruction$gabow$pmi.no.reg.adj, title, 'gabow')
} else if (experiment$reconstruction$gabow$score == find.score$gabow$score) {
find.score$gabow$id = c(find.score$gabow$id, exp)
}
}
}
}
filename = paste0('scite_output/datasets/missing/single/58_1_1_ml0')
read = readLines(paste0(filename, '.gv'))
read = gsub(' ', '', read)
read = gsub(';', '', read)
write(paste0(read, collapse = '\n'), file = paste0(filename, '.correct.gv'))
readdot = read.dot(paste0(filename, '.correct.gv'))
graph = graph.adjacency(readdot)
scite.tree.raw = get.adjacency(graph, sparse = FALSE)
scite.tree = matrix(0, ncol = ncol(scite.tree.raw) - 1, nrow = ncol(scite.tree.raw) - 1)
for (i in 1:nrow(scite.tree)) {
for(j in 1:ncol(scite.tree)) {
if (scite.tree.raw[as.character(i), as.character(j)] == 1) {
scite.tree[i,j] = 1
}
}
}
dataset = experiment.missing.data[[1,1]][[1]]
dataset = dataset$reconstructions$capri$bic.adj
colnames(scite.tree) = colnames(dataset)
rownames(scite.tree) = colnames(dataset)
load('RData/scite.RData')
colnames(scite) = colnames(dataset)
# create the igraph structure
net = empty.graph(colnames(scite), num = 4)
categoric.dataset = data.frame(apply(scite, 2, factor))
for (name in colnames(categoric.dataset)) {
levels(categoric.dataset[[name]]) = c(0,1,3)
}
amat(net[[1]]) = scite.tree
find.score$scite$score = bnlearn::score(net[[1]], categoric.dataset, type='loglik')
title = paste('\nSCITE score:', round(find.score$scite$score, 3))
plot_graph(scite.tree, title, 'scite') |
##
## Script name: nuts.R
##
## Purpose of script: implements the No-U-Turn sampler by Hoffmann and Gelman (2014) with dual averaging and mass matrix adaption for
## Bayesian Conditional transformation models; imitates the output style of
# rstan (Stan Development Team, 2020) and uses mass matrix adaptation from adnuts (Monnahan and Kristensen, 2018)
##
## Author: Manuel Carlan
##
## Date Created: 2020-10-7
##
## Email: mcarlan@uni-goettingen.de
##
## ---------------------------
NUTS <- function(n_iter, xx, f, gr, ll, start, warmup = floor(n_iter/2),thin=1,
seed = NULL, chain = 1, nuts_settings, fixed = NULL,
prior_settings){
its <- n_iter
if(!is.null(seed)) set.seed(seed)
if(chain != 1) stop("currently no parallel chains allowed")
n_coef <- length(start)
# supplement missing defaults
if(!is.null(nuts_settings)){
default_control <- list(adapt_delta = 0.8, metric = NULL, step_size = NULL, adapt_M = TRUE, max_treedepth=12, init_buffer = 75, term_buffer = 50, window = 25)
nuts_settings <- modifyList(default_control, nuts_settings)
}
# number of coefficient
n_coef <- length(start)
max_td <- nuts_settings$max_treedepth
adapt_delta <- nuts_settings$adapt_delta
adapt_M <- nuts_settings$adapt_M
M <- nuts_settings$metric
if(is.null(M)) M <- rep(1, n_coef)
if(!is.vector(M)) stop("only diagonal mass matrices (with positive diagonal entries) allowed")
## BCTM uses stan default values for sampler warmup (https://mc-stan.org/docs/2_26/reference-manual/hmc-algorithm-parameters.html)
interval1 <- nuts_settings$init_buffer
interval2 <- nuts_settings$term_buffer
interval3 <- nuts_settings$window
aws <- interval2 # adapt window size
anw <- interval1 + interval2 # adapt next window
if(warmup < (interval1+interval2+interval3) & adapt_M){
warning("Specified warmup-up phase is too short for adaption.")
adapt_M <- FALSE
}
rotation <- rotate_rescale(f = f, gr = gr, ll = ll ,xx = xx, M=M, beta_y = start)
n_coef <- xx[["n_coef"]]
n_pen_grps <- xx[["npen"]]
Ks_f <- xx[["Ks_f"]]
hyper_a <- xx[["hyperparams"]][["a"]]
hyper_b <- xx[["hyperparams"]][["b"]]
ranks <- xx$ranks
pen_ident <- xx$pen_ident
# Smats <- vector("list", 2)
# n_pen_grpss <- xx$npen
# Ks_new <- vector("list", npen)
step_size <- nuts_settings$step_size
f2 <- rotation$f2 # posterior
gr2 <- rotation$gr2
ll2 <- rotation$ll2
# obtain rotated and rescaled parameters
beta_cur <- rotation$beta_cur
# square metric
M_sq <- rotation$M_sq
# sampler params as in rstan
sampler_params <- matrix(0, its, 6)
xx$Xpt <- t(xx$Xp)
## how many steps were taken at each iteration, useful for tuning
# eps_start <- 0.01
# should dual averaging be applied (default TRUE)
dual_averaging <- is.null(step_size)
if(dual_averaging){
H_bar <- eps_out <- eps_bar <- rep(NA, length = warmup+1)
step_size <- eps_out[1] <- eps_bar[1] <- find_reasonable_epsilon(beta = beta_cur, f = f2, gr = gr2, xx = xx)
mu <- log(10*step_size)
H_bar[1] <- 0
gamma <- 0.05
t0 <- 10
kappa <- 0.75
} else {
## dummy values to return
eps_out <- eps_bar <- H_bar <- NULL
}
# xx <- as.environment(xx)
mbm <- microbenchmark::microbenchmark(
"posterior" = {
b <- f(runif(n_coef), xx)
},
"gradient" = {
b <- gr(runif(n_coef), xx)
})
print(mbm)
j_out <- rep(0, n_iter)
# message('')
# message(paste('Starting NUTS at', start))
Ks_t <- Ks_f
# names(Ks_t) <- names(pen_ident)
K_inds <- xx$K_inds
labels <- xx$labels
npen <- xx$npen
## beta are the position parameters
## r are the momentum parameters
beta_out <- matrix(0, nrow=n_iter, ncol=n_coef)
colnames(beta_out) <- colnames(xx$X)
log_liks <- log_posteriors <- rep(0, len=n_iter)
tau2_out <- matrix(1, nrow=n_iter, ncol=n_pen_grps)
colnames(tau2_out) <- unlist( lapply(K_inds, function(x) paste0("tau2_", x)))
# which(eff_pen[1] == name_groups)
# lapply( names(pen_ident))
# inds[[1]] <-
pb <- progress::progress_bar$new(format = "[:bar] :current/:total (:percent)", total = n_iter)
pb$tick(0)
tau2 <- rep(0, npen)
# inds <- 1:n_coef
# Ks <- vector(mode="list", length=n_tau)
start <- Sys.time()
for(iter in 1:n_iter){
# sourceCpp(paste0(sourcepath,"rcpp/gauss_hmc_update5.cpp"))
beta_minus <- beta_plus <- beta_cur
beta_out[iter,] <- M_sq*beta_cur
log_liks[iter] <- if(iter == 1) ll2(beta_cur, xx) else log_liks[iter-1]
log_posteriors[iter] <- if(iter == 1) ll2(beta_cur, xx) else log_posteriors[iter-1]
r_cur <- r_plus <- r_minus <- rnorm(n_coef, 0, 1)
H_prime <- calculate_hamiltonian(beta = beta_cur, xx = xx, r = r_cur, f = f2)
## slicing step
log_u <- log(runif(1)) + calculate_hamiltonian(beta=beta_cur, xx=xx,r=r_cur, f=f2)
j <- 0
n <- 1
s <- 1
div <- 0
info <- as.environment(list(n.calls=0, div=0))
while(s==1) {
#print(j)
# choose a direction v
v <- sample(c(1,-1), 1)
if(v==1){
temp <- build_tree(beta = beta_plus, xx = xx,r = r_plus, log_u = log_u, v = v,
j = j, step_size = step_size, H_prime = H_prime,
f = f2, gr =gr2, info = info)
beta_plus <- temp$beta_plus
r_plus <- temp$r_plus
} else {
temp <- build_tree(beta = beta_minus, xx = xx,r = r_minus, log_u = log_u, v = v,
j = j, step_size = step_size, H_prime = H_prime,
f = f2, gr =gr2, info=info)
beta_minus <- temp$beta_minus
r_minus <- temp$r_minus
}
if(!is.finite(temp$s)) temp$s <- 0
if(temp$s==1) {
if(runif(1) <= temp$n/n){
beta_cur <- temp$beta_prime
log_liks[iter] <- ll2(beta_cur, xx)
log_posteriors[iter] <- f2(beta_cur, xx)
# beta_out is on the rotated scale
beta_out[iter,] <- M_sq*beta_cur
}
}
# end if
n <- n + temp$n
s <- temp$s*check_nuts(beta_plus, beta_minus, r_plus, r_minus)
#print(paste0("s: ", s))
j <- j+1
if(!is.finite(s)) s <- 0
if(j >= max_td) {
warning(paste0("max_treedepth(", max_td, ") reached"))
break
}
}
j_out[iter] <- j-1
alpha2 <- temp$alpha/temp$n_alpha
if(!is.finite(alpha2)) alpha2 <- 0
if(dual_averaging){
if(iter <= warmup){
H_bar[iter+1] <- (1-1/(iter + t0))*H_bar[iter] + (adapt_delta - alpha2)/(iter + t0)
# set logeps and logepsbar
logeps <- mu - sqrt(iter)*H_bar[iter+1]/gamma
eps_out[iter+1] <- exp(logeps)
logepsbar <- iter^(-kappa)*logeps + (1-iter^(-kappa))*log(eps_bar[iter])
eps_bar[iter+1] <- exp(logepsbar)
step_size <- eps_out[iter+1]
} else {
step_size <- eps_bar[warmup]
# step_size <- eps_out[warmup]
if(iter== warmup +1) print(paste("step size after warmup: ",step_size))
}
}
# this is from adnuts ----------------------------------------------
if(adapt_M & .slow_phase(iter, warmup, interval1, interval3)){
if(iter== interval1){
m1 <- beta_out[iter,]
s1 <- rep(0, len=n_coef)
k <- 1
} else if(iter==anw){
M <- as.numeric(s1/(k-1)) # estimated variance
rotation <- rotate_rescale(f=f, xx=xx,gr=gr, ll=ll, M=M, beta_y=beta_out[iter,])
f2 <- rotation$f2
gr2 <- rotation$gr2
ll2 <- rotation$ll2
M_sq <- rotation$M_sq
beta_cur <- rotation$beta_cur
## Reset the running variance calculation
k <- 1
s1 <- rep(0, n_coef)
m1 <- beta_out[iter,]
aws <- 2*aws
anw <- .compute_next_window(iter, anw, warmup, interval1, aws, interval3)
step_size <- find_reasonable_epsilon(beta = beta_cur, xx = xx,f = f2, gr = gr2)
} else {
k <- k+1; m0 <- m1; s0 <- s1
m1 <- m0+(beta_out[iter,]-m0)/k
s1 <- s0+(beta_out[iter,]-m0)*(beta_out[iter,]-m1)
}
}
#---------------------------------------------------------------------------
# update tau2s and multiply with corresponding precision matrix
for(i in 1:npen){
grp <- pen_ident[[i]]
par <- beta_out[iter, grp ]
tau2_out[iter,i] <- tau2 <- rinvgamma(1, hyper_a + 0.5*ranks[[i]], hyper_b + as.vector(0.5*t(par)%*%(Ks_f[[i]]%*%par)))
Ks_t[[i]] <- Ks_f[[i]]/ tau2
}
S <- as.matrix(bdiag(lapply(K_inds, function(x) Reduce("+", Ks_t[x]))))
xx$S <- S
sampler_params[iter,] <- c(alpha2, step_size, j, info$n.calls, info$div, f2(beta_cur, xx))
pb$tick(1)
if(iter==warmup) time.warmup <- difftime(Sys.time(), start, units='secs')
.print.mcmc.progress(iter, n_iter, warmup, chain)
}
beta_out <- beta_out[seq(1, nrow(beta_out), by=thin),]
warmup <- warmup/thin
colnames(sampler_params) <- c(NULL, "accept_stat", "step_size", "treedepth", "n_leapfrog", "divergent", "energy")
sampler_params <- as_tibble(sampler_params)
# this is from adnuts/Rstan--------------------------------------------------------------------------------------------
sampler_params <- sampler_params[seq(1, nrow(sampler_params), by=thin),]
ndiv <- sum(sampler_params[-(1:warmup),5])
if(ndiv>0) message(paste0("There were ", ndiv, " divergent transitions after warmup"))
msg <- paste0("Final acceptance ratio=", sprintf("%.2f", colMeans(sampler_params[-(1:warmup),1])))
if(dual_averaging) msg <- paste0(msg,", and target=", adapt_delta)
message(msg)
if(dual_averaging) message(paste0("Final step size=", round(step_size, 3),
"; after ", warmup, " warmup iterations"))
time.total <- difftime(Sys.time(), start, units='secs')
.print.mcmc.timing(time.warmup=time.warmup, time.total=time.total)
#-----------------------------------------------------------------------------------------------------------------
list(beta = beta_out, tau2 = tau2_out, log_liks = log_liks, lp = log_posteriors, sampler_params = sampler_params,
time.total = time.total, time.warmup = time.warmup,
warmup = warmup, max_treedepth = max_td)
}
# log likelihoods (only used for calculation of ICs)
ll_gauss <- function(param, xx){
bt <- param
exp_ident <- xx$exp_ident+1
bt[exp_ident] <- exp(bt[exp_ident])
sum(dnorm(xx$X%*%bt, log=T)) + sum(log(xx$Xp%*%bt))
}
ll_logit <- function(param, xx){
bt <- param
exp_ident <- xx$exp_ident+1
bt[exp_ident] <- exp(bt[exp_ident])
sum(dlogis(xx$X%*%bt, log=T)) + sum(log(xx$Xp%*%bt))
}
ll_mev <- function(param, xx){
bt <- param
exp_ident <- xx$exp_ident+1
bt[exp_ident] <- exp(bt[exp_ident])
sum(bctm_dmev(xx$X%*%bt, log=T)) + sum(log(xx$Xp%*%bt))
}
# minimum-extreme-value density
bctm_dmev <- function(x, log = F) {
ret <- x - exp(x)
if (!log) return(exp(ret))
ret
}
# minimum-extreme-value cdf
bctm_pmev <- function(x) {
1 - exp(-exp(x))
}
calculate_hamiltonian <- function(beta, xx, r, f) f(beta, xx)- 0.5*sum(r^2)
check_nuts = function(beta_plus, beta_minus, r_plus, r_minus){
(crossprod(beta_plus - beta_minus, r_minus)) >= 0 && (crossprod(beta_plus - beta_minus, r_plus) >= 0)
}
rotate_rescale <- function(f, gr, ll, xx, M, beta_y){
if(!is.vector(M)) stop("only diagonal mass matrices (with positive diagonal entries) allowed")
M_sq <- sqrt(M)
# apply opposite transformation to the parameters
ll2 <- function(beta, xx) ll(M_sq * beta, xx)
f2 <- function(beta, xx) f(M_sq * beta, xx)
gr2 <- function(beta, xx) as.vector(gr(M_sq * beta, xx) ) * M_sq
# apply transformation (rotation and scaling) to simplify kinetic energy
beta_cur <- (1/M_sq) * beta_y
list(f2 = f2, gr2 = gr2, ll2 = ll2, beta_cur = beta_cur, M_sq = M_sq)
}
# this is the FindReasonableEpsilon() in Hoffman and Gelman (2014)
find_reasonable_epsilon <- function(beta, f, gr, xx){
#initialize
step_size <- 1
r <- rnorm(length(beta))
##leapfrog step
lf <- leapfrog_step(beta, r, step_size, gr, xx)
beta_prime <- lf$beta_prime
r_prime <- lf$r_prime
H <- calculate_hamiltonian(beta = beta, xx=xx, r=r, f=f)
H_prime <- calculate_hamiltonian(beta = beta_prime, xx=xx, r=r_prime, f=f)
a <- 2*(exp(H_prime)/exp(H) > 0.5)-1
if(!is.finite(a)) a <- -1
k <- 1
while (!is.finite(H) | !is.finite(H_prime) | a*H_prime-a*H > -a*log(2)) {
step_size <- (2^a)*step_size
lf <- leapfrog_step(beta, r, step_size, gr, xx)
beta_prime <- lf$beta_prime
r_prime <- lf$r_prime
H_prime <- calculate_hamiltonian(beta = beta_prime, xx = xx, r = r_prime, f = f)
k <- k + 1
if(k > 500) stop("Could not find reasonable epsilon in 500 iterations")
}
# if(verbose) message(paste("Reasonable epsilon=", step_size, "found after", k, "steps"))
return(invisible(step_size))
}
# generic leapfrog update
leapfrog_step = function(beta, r, step_size, gr, xx){
r_prime <- r + 0.5 * step_size * gr(beta, xx)
beta_prime <- beta + step_size * r_prime
r_prime <- r_prime + 0.5 * step_size * gr(beta_prime, xx)
list(beta_prime = beta_prime, r_prime = r_prime)
}
build_tree <- function(beta, xx, r, log_u, v, j, step_size, H_prime, f, gr,
delta_max=1000, info = environment() ){
# print(diag(S))
if(j==0){
lf <- leapfrog_step(beta, r, v*step_size, gr, xx)
beta <- lf$beta_prime
r <- lf$r_prime
H <- calculate_hamiltonian(beta=beta, xx=xx, r=r, f=f)
n <- log_u <= H
s <- log_u < delta_max + H
if(!is.finite(H) | s == 0){
info$div <- 1
s <- 0
}
log_alpha <- H-H_prime
alpha <- min(exp(log_alpha), 1)
return(list(beta_minus=beta, beta_plus=beta, beta_prime=beta, r_minus=r,
r_plus=r, s=s, n=n, alpha=alpha, n_alpha=1))
} else {
## recursion - build left and right branches
branch1 <- build_tree(beta=beta, xx=xx, r=r, log_u=log_u, v=v, j=j-1, step_size=step_size,
H_prime=H_prime, f=f, gr=gr, info=info)
beta_minus <- branch1$beta_minus
beta_plus <- branch1$beta_plus
beta_prime <- branch1$beta_prime
r_minus <- branch1$r_minus
r_plus <- branch1$r_plus
alpha <- branch1$alpha
n_alpha <- branch1$n_alpha
s <- branch1$s
if(!is.finite(s)) s <- 0
nprime <- branch1$n
if(s==1){
if(v== -1){
branch2 <- build_tree(beta = beta_minus, xx = xx, r = r_minus, log_u = log_u, v=v, j=j-1, step_size=step_size, H_prime = H_prime, f=f, gr=gr, info=info)
beta_minus <- branch2$beta_minus
r_minus <- branch2$r_minus
} else {
branch2 <- build_tree(beta=beta_plus, xx=xx, r =r_plus, log_u = log_u, v=v, j=j-1, step_size=step_size, H_prime = H_prime, f=f, gr=gr, info=info)
beta_plus <- branch2$beta_plus
r_plus <- branch2$r_plus
}
nprime <- branch2$n+ branch1$n
if(!is.finite(nprime)) nprime <- 0
## acceptance step
if(nprime>0){
if(runif(1) <= branch2$n/nprime){
beta_prime <- branch2$beta_prime
alpha <- branch1$alpha+branch2$alpha
n_alpha <- branch1$n_alpha+branch2$n_alpha
}
}
# check if proposal is valid
b <- check_nuts(beta_plus = beta_plus, beta_minus=beta_minus,
r_plus = r_plus, r_minus = r_minus)
s <- branch2$s*b
}
return(list(beta_minus =beta_minus, beta_plus = beta_plus, beta_prime=beta_prime,
r_minus = r_minus, r_plus = r_plus,
s = s, n = nprime,
alpha = alpha, n_alpha=n_alpha))
}
}
| /nuts/nuts.R | no_license | manucarl/bctm_showcase | R | false | false | 16,182 | r | ##
## Script name: nuts.R
##
## Purpose of script: implements the No-U-Turn sampler by Hoffmann and Gelman (2014) with dual averaging and mass matrix adaption for
## Bayesian Conditional transformation models; imitates the output style of
# rstan (Stan Development Team, 2020) and uses mass matrix adaptation from adnuts (Monnahan and Kristensen, 2018)
##
## Author: Manuel Carlan
##
## Date Created: 2020-10-7
##
## Email: mcarlan@uni-goettingen.de
##
## ---------------------------
NUTS <- function(n_iter, xx, f, gr, ll, start, warmup = floor(n_iter/2),thin=1,
seed = NULL, chain = 1, nuts_settings, fixed = NULL,
prior_settings){
its <- n_iter
if(!is.null(seed)) set.seed(seed)
if(chain != 1) stop("currently no parallel chains allowed")
n_coef <- length(start)
# supplement missing defaults
if(!is.null(nuts_settings)){
default_control <- list(adapt_delta = 0.8, metric = NULL, step_size = NULL, adapt_M = TRUE, max_treedepth=12, init_buffer = 75, term_buffer = 50, window = 25)
nuts_settings <- modifyList(default_control, nuts_settings)
}
# number of coefficient
n_coef <- length(start)
max_td <- nuts_settings$max_treedepth
adapt_delta <- nuts_settings$adapt_delta
adapt_M <- nuts_settings$adapt_M
M <- nuts_settings$metric
if(is.null(M)) M <- rep(1, n_coef)
if(!is.vector(M)) stop("only diagonal mass matrices (with positive diagonal entries) allowed")
## BCTM uses stan default values for sampler warmup (https://mc-stan.org/docs/2_26/reference-manual/hmc-algorithm-parameters.html)
interval1 <- nuts_settings$init_buffer
interval2 <- nuts_settings$term_buffer
interval3 <- nuts_settings$window
aws <- interval2 # adapt window size
anw <- interval1 + interval2 # adapt next window
if(warmup < (interval1+interval2+interval3) & adapt_M){
warning("Specified warmup-up phase is too short for adaption.")
adapt_M <- FALSE
}
rotation <- rotate_rescale(f = f, gr = gr, ll = ll ,xx = xx, M=M, beta_y = start)
n_coef <- xx[["n_coef"]]
n_pen_grps <- xx[["npen"]]
Ks_f <- xx[["Ks_f"]]
hyper_a <- xx[["hyperparams"]][["a"]]
hyper_b <- xx[["hyperparams"]][["b"]]
ranks <- xx$ranks
pen_ident <- xx$pen_ident
# Smats <- vector("list", 2)
# n_pen_grpss <- xx$npen
# Ks_new <- vector("list", npen)
step_size <- nuts_settings$step_size
f2 <- rotation$f2 # posterior
gr2 <- rotation$gr2
ll2 <- rotation$ll2
# obtain rotated and rescaled parameters
beta_cur <- rotation$beta_cur
# square metric
M_sq <- rotation$M_sq
# sampler params as in rstan
sampler_params <- matrix(0, its, 6)
xx$Xpt <- t(xx$Xp)
## how many steps were taken at each iteration, useful for tuning
# eps_start <- 0.01
# should dual averaging be applied (default TRUE)
dual_averaging <- is.null(step_size)
if(dual_averaging){
H_bar <- eps_out <- eps_bar <- rep(NA, length = warmup+1)
step_size <- eps_out[1] <- eps_bar[1] <- find_reasonable_epsilon(beta = beta_cur, f = f2, gr = gr2, xx = xx)
mu <- log(10*step_size)
H_bar[1] <- 0
gamma <- 0.05
t0 <- 10
kappa <- 0.75
} else {
## dummy values to return
eps_out <- eps_bar <- H_bar <- NULL
}
# xx <- as.environment(xx)
mbm <- microbenchmark::microbenchmark(
"posterior" = {
b <- f(runif(n_coef), xx)
},
"gradient" = {
b <- gr(runif(n_coef), xx)
})
print(mbm)
j_out <- rep(0, n_iter)
# message('')
# message(paste('Starting NUTS at', start))
Ks_t <- Ks_f
# names(Ks_t) <- names(pen_ident)
K_inds <- xx$K_inds
labels <- xx$labels
npen <- xx$npen
## beta are the position parameters
## r are the momentum parameters
beta_out <- matrix(0, nrow=n_iter, ncol=n_coef)
colnames(beta_out) <- colnames(xx$X)
log_liks <- log_posteriors <- rep(0, len=n_iter)
tau2_out <- matrix(1, nrow=n_iter, ncol=n_pen_grps)
colnames(tau2_out) <- unlist( lapply(K_inds, function(x) paste0("tau2_", x)))
# which(eff_pen[1] == name_groups)
# lapply( names(pen_ident))
# inds[[1]] <-
pb <- progress::progress_bar$new(format = "[:bar] :current/:total (:percent)", total = n_iter)
pb$tick(0)
tau2 <- rep(0, npen)
# inds <- 1:n_coef
# Ks <- vector(mode="list", length=n_tau)
start <- Sys.time()
for(iter in 1:n_iter){
# sourceCpp(paste0(sourcepath,"rcpp/gauss_hmc_update5.cpp"))
beta_minus <- beta_plus <- beta_cur
beta_out[iter,] <- M_sq*beta_cur
log_liks[iter] <- if(iter == 1) ll2(beta_cur, xx) else log_liks[iter-1]
log_posteriors[iter] <- if(iter == 1) ll2(beta_cur, xx) else log_posteriors[iter-1]
r_cur <- r_plus <- r_minus <- rnorm(n_coef, 0, 1)
H_prime <- calculate_hamiltonian(beta = beta_cur, xx = xx, r = r_cur, f = f2)
## slicing step
log_u <- log(runif(1)) + calculate_hamiltonian(beta=beta_cur, xx=xx,r=r_cur, f=f2)
j <- 0
n <- 1
s <- 1
div <- 0
info <- as.environment(list(n.calls=0, div=0))
while(s==1) {
#print(j)
# choose a direction v
v <- sample(c(1,-1), 1)
if(v==1){
temp <- build_tree(beta = beta_plus, xx = xx,r = r_plus, log_u = log_u, v = v,
j = j, step_size = step_size, H_prime = H_prime,
f = f2, gr =gr2, info = info)
beta_plus <- temp$beta_plus
r_plus <- temp$r_plus
} else {
temp <- build_tree(beta = beta_minus, xx = xx,r = r_minus, log_u = log_u, v = v,
j = j, step_size = step_size, H_prime = H_prime,
f = f2, gr =gr2, info=info)
beta_minus <- temp$beta_minus
r_minus <- temp$r_minus
}
if(!is.finite(temp$s)) temp$s <- 0
if(temp$s==1) {
if(runif(1) <= temp$n/n){
beta_cur <- temp$beta_prime
log_liks[iter] <- ll2(beta_cur, xx)
log_posteriors[iter] <- f2(beta_cur, xx)
# beta_out is on the rotated scale
beta_out[iter,] <- M_sq*beta_cur
}
}
# end if
n <- n + temp$n
s <- temp$s*check_nuts(beta_plus, beta_minus, r_plus, r_minus)
#print(paste0("s: ", s))
j <- j+1
if(!is.finite(s)) s <- 0
if(j >= max_td) {
warning(paste0("max_treedepth(", max_td, ") reached"))
break
}
}
j_out[iter] <- j-1
alpha2 <- temp$alpha/temp$n_alpha
if(!is.finite(alpha2)) alpha2 <- 0
if(dual_averaging){
if(iter <= warmup){
H_bar[iter+1] <- (1-1/(iter + t0))*H_bar[iter] + (adapt_delta - alpha2)/(iter + t0)
# set logeps and logepsbar
logeps <- mu - sqrt(iter)*H_bar[iter+1]/gamma
eps_out[iter+1] <- exp(logeps)
logepsbar <- iter^(-kappa)*logeps + (1-iter^(-kappa))*log(eps_bar[iter])
eps_bar[iter+1] <- exp(logepsbar)
step_size <- eps_out[iter+1]
} else {
step_size <- eps_bar[warmup]
# step_size <- eps_out[warmup]
if(iter== warmup +1) print(paste("step size after warmup: ",step_size))
}
}
# this is from adnuts ----------------------------------------------
if(adapt_M & .slow_phase(iter, warmup, interval1, interval3)){
if(iter== interval1){
m1 <- beta_out[iter,]
s1 <- rep(0, len=n_coef)
k <- 1
} else if(iter==anw){
M <- as.numeric(s1/(k-1)) # estimated variance
rotation <- rotate_rescale(f=f, xx=xx,gr=gr, ll=ll, M=M, beta_y=beta_out[iter,])
f2 <- rotation$f2
gr2 <- rotation$gr2
ll2 <- rotation$ll2
M_sq <- rotation$M_sq
beta_cur <- rotation$beta_cur
## Reset the running variance calculation
k <- 1
s1 <- rep(0, n_coef)
m1 <- beta_out[iter,]
aws <- 2*aws
anw <- .compute_next_window(iter, anw, warmup, interval1, aws, interval3)
step_size <- find_reasonable_epsilon(beta = beta_cur, xx = xx,f = f2, gr = gr2)
} else {
k <- k+1; m0 <- m1; s0 <- s1
m1 <- m0+(beta_out[iter,]-m0)/k
s1 <- s0+(beta_out[iter,]-m0)*(beta_out[iter,]-m1)
}
}
#---------------------------------------------------------------------------
# update tau2s and multiply with corresponding precision matrix
for(i in 1:npen){
grp <- pen_ident[[i]]
par <- beta_out[iter, grp ]
tau2_out[iter,i] <- tau2 <- rinvgamma(1, hyper_a + 0.5*ranks[[i]], hyper_b + as.vector(0.5*t(par)%*%(Ks_f[[i]]%*%par)))
Ks_t[[i]] <- Ks_f[[i]]/ tau2
}
S <- as.matrix(bdiag(lapply(K_inds, function(x) Reduce("+", Ks_t[x]))))
xx$S <- S
sampler_params[iter,] <- c(alpha2, step_size, j, info$n.calls, info$div, f2(beta_cur, xx))
pb$tick(1)
if(iter==warmup) time.warmup <- difftime(Sys.time(), start, units='secs')
.print.mcmc.progress(iter, n_iter, warmup, chain)
}
beta_out <- beta_out[seq(1, nrow(beta_out), by=thin),]
warmup <- warmup/thin
colnames(sampler_params) <- c(NULL, "accept_stat", "step_size", "treedepth", "n_leapfrog", "divergent", "energy")
sampler_params <- as_tibble(sampler_params)
# this is from adnuts/Rstan--------------------------------------------------------------------------------------------
sampler_params <- sampler_params[seq(1, nrow(sampler_params), by=thin),]
ndiv <- sum(sampler_params[-(1:warmup),5])
if(ndiv>0) message(paste0("There were ", ndiv, " divergent transitions after warmup"))
msg <- paste0("Final acceptance ratio=", sprintf("%.2f", colMeans(sampler_params[-(1:warmup),1])))
if(dual_averaging) msg <- paste0(msg,", and target=", adapt_delta)
message(msg)
if(dual_averaging) message(paste0("Final step size=", round(step_size, 3),
"; after ", warmup, " warmup iterations"))
time.total <- difftime(Sys.time(), start, units='secs')
.print.mcmc.timing(time.warmup=time.warmup, time.total=time.total)
#-----------------------------------------------------------------------------------------------------------------
list(beta = beta_out, tau2 = tau2_out, log_liks = log_liks, lp = log_posteriors, sampler_params = sampler_params,
time.total = time.total, time.warmup = time.warmup,
warmup = warmup, max_treedepth = max_td)
}
# log likelihoods (only used for calculation of ICs)
ll_gauss <- function(param, xx){
bt <- param
exp_ident <- xx$exp_ident+1
bt[exp_ident] <- exp(bt[exp_ident])
sum(dnorm(xx$X%*%bt, log=T)) + sum(log(xx$Xp%*%bt))
}
ll_logit <- function(param, xx){
bt <- param
exp_ident <- xx$exp_ident+1
bt[exp_ident] <- exp(bt[exp_ident])
sum(dlogis(xx$X%*%bt, log=T)) + sum(log(xx$Xp%*%bt))
}
ll_mev <- function(param, xx){
bt <- param
exp_ident <- xx$exp_ident+1
bt[exp_ident] <- exp(bt[exp_ident])
sum(bctm_dmev(xx$X%*%bt, log=T)) + sum(log(xx$Xp%*%bt))
}
# minimum-extreme-value density
bctm_dmev <- function(x, log = F) {
ret <- x - exp(x)
if (!log) return(exp(ret))
ret
}
# minimum-extreme-value cdf
bctm_pmev <- function(x) {
1 - exp(-exp(x))
}
calculate_hamiltonian <- function(beta, xx, r, f) f(beta, xx)- 0.5*sum(r^2)
check_nuts = function(beta_plus, beta_minus, r_plus, r_minus){
(crossprod(beta_plus - beta_minus, r_minus)) >= 0 && (crossprod(beta_plus - beta_minus, r_plus) >= 0)
}
rotate_rescale <- function(f, gr, ll, xx, M, beta_y){
if(!is.vector(M)) stop("only diagonal mass matrices (with positive diagonal entries) allowed")
M_sq <- sqrt(M)
# apply opposite transformation to the parameters
ll2 <- function(beta, xx) ll(M_sq * beta, xx)
f2 <- function(beta, xx) f(M_sq * beta, xx)
gr2 <- function(beta, xx) as.vector(gr(M_sq * beta, xx) ) * M_sq
# apply transformation (rotation and scaling) to simplify kinetic energy
beta_cur <- (1/M_sq) * beta_y
list(f2 = f2, gr2 = gr2, ll2 = ll2, beta_cur = beta_cur, M_sq = M_sq)
}
# this is the FindReasonableEpsilon() in Hoffman and Gelman (2014)
find_reasonable_epsilon <- function(beta, f, gr, xx){
#initialize
step_size <- 1
r <- rnorm(length(beta))
##leapfrog step
lf <- leapfrog_step(beta, r, step_size, gr, xx)
beta_prime <- lf$beta_prime
r_prime <- lf$r_prime
H <- calculate_hamiltonian(beta = beta, xx=xx, r=r, f=f)
H_prime <- calculate_hamiltonian(beta = beta_prime, xx=xx, r=r_prime, f=f)
a <- 2*(exp(H_prime)/exp(H) > 0.5)-1
if(!is.finite(a)) a <- -1
k <- 1
while (!is.finite(H) | !is.finite(H_prime) | a*H_prime-a*H > -a*log(2)) {
step_size <- (2^a)*step_size
lf <- leapfrog_step(beta, r, step_size, gr, xx)
beta_prime <- lf$beta_prime
r_prime <- lf$r_prime
H_prime <- calculate_hamiltonian(beta = beta_prime, xx = xx, r = r_prime, f = f)
k <- k + 1
if(k > 500) stop("Could not find reasonable epsilon in 500 iterations")
}
# if(verbose) message(paste("Reasonable epsilon=", step_size, "found after", k, "steps"))
return(invisible(step_size))
}
# generic leapfrog update
leapfrog_step = function(beta, r, step_size, gr, xx){
r_prime <- r + 0.5 * step_size * gr(beta, xx)
beta_prime <- beta + step_size * r_prime
r_prime <- r_prime + 0.5 * step_size * gr(beta_prime, xx)
list(beta_prime = beta_prime, r_prime = r_prime)
}
build_tree <- function(beta, xx, r, log_u, v, j, step_size, H_prime, f, gr,
delta_max=1000, info = environment() ){
# print(diag(S))
if(j==0){
lf <- leapfrog_step(beta, r, v*step_size, gr, xx)
beta <- lf$beta_prime
r <- lf$r_prime
H <- calculate_hamiltonian(beta=beta, xx=xx, r=r, f=f)
n <- log_u <= H
s <- log_u < delta_max + H
if(!is.finite(H) | s == 0){
info$div <- 1
s <- 0
}
log_alpha <- H-H_prime
alpha <- min(exp(log_alpha), 1)
return(list(beta_minus=beta, beta_plus=beta, beta_prime=beta, r_minus=r,
r_plus=r, s=s, n=n, alpha=alpha, n_alpha=1))
} else {
## recursion - build left and right branches
branch1 <- build_tree(beta=beta, xx=xx, r=r, log_u=log_u, v=v, j=j-1, step_size=step_size,
H_prime=H_prime, f=f, gr=gr, info=info)
beta_minus <- branch1$beta_minus
beta_plus <- branch1$beta_plus
beta_prime <- branch1$beta_prime
r_minus <- branch1$r_minus
r_plus <- branch1$r_plus
alpha <- branch1$alpha
n_alpha <- branch1$n_alpha
s <- branch1$s
if(!is.finite(s)) s <- 0
nprime <- branch1$n
if(s==1){
if(v== -1){
branch2 <- build_tree(beta = beta_minus, xx = xx, r = r_minus, log_u = log_u, v=v, j=j-1, step_size=step_size, H_prime = H_prime, f=f, gr=gr, info=info)
beta_minus <- branch2$beta_minus
r_minus <- branch2$r_minus
} else {
branch2 <- build_tree(beta=beta_plus, xx=xx, r =r_plus, log_u = log_u, v=v, j=j-1, step_size=step_size, H_prime = H_prime, f=f, gr=gr, info=info)
beta_plus <- branch2$beta_plus
r_plus <- branch2$r_plus
}
nprime <- branch2$n+ branch1$n
if(!is.finite(nprime)) nprime <- 0
## acceptance step
if(nprime>0){
if(runif(1) <= branch2$n/nprime){
beta_prime <- branch2$beta_prime
alpha <- branch1$alpha+branch2$alpha
n_alpha <- branch1$n_alpha+branch2$n_alpha
}
}
# check if proposal is valid
b <- check_nuts(beta_plus = beta_plus, beta_minus=beta_minus,
r_plus = r_plus, r_minus = r_minus)
s <- branch2$s*b
}
return(list(beta_minus =beta_minus, beta_plus = beta_plus, beta_prime=beta_prime,
r_minus = r_minus, r_plus = r_plus,
s = s, n = nprime,
alpha = alpha, n_alpha=n_alpha))
}
}
|
#Greg Kelly
#libraries used in this script
library(tidyverse)
library(readxl)
library(readr)
library(lubridate)
library(rio)
library(dplyr)
library(plyr)
#change the working directory setwd('..') brings you out of that folder, up one directory level
#getwd()
#setwd("DublinAQData/")
#read in all files from a directory
#files <- list.files()
# ASHTOWNGROVE DAILY ----
setwd("AshtownGrove/")
#read in the text files
Dublin_AshtownGrove_PM10_1996 <- read_table2("Dublin_AshtownGrove_PM10_1996.txt")
Dublin_AshtownGrove_PM10_1997 <- read_table2("Dublin_AshtownGrove_PM10_1997.txt")
#bind the 2 datasets
PM10 = rbind(Dublin_AshtownGrove_PM10_1996, Dublin_AshtownGrove_PM10_1997)
#remove potential headers after binding the data
PM10 = PM10[- grep("PM10", PM10$`PM10(ug/m3)` ),]
#change the date column so R recognises it as a date column
library(lubridate)
#check if R sees it as a date
#sapply(PM10$Date, is.Date)
#set date column as date
PM10$Date <- as.Date(PM10$Date, format="%d/%m/%Y")
#quick look at the plot
#plot(PM10)
#save the output in a created Folder hourly data in main project directory
write_csv(PM10, "../Gathered_Data/Dublin_AshtownGrove_PM10_ugm3_daily.csv")
#clean the enviroment
rm(list=ls())
#BALBRIGGAN DAILY--------------------------------------------------------------------------------
#change directory to balbriggan, change into the daily folder
setwd('..')
setwd("Balbriggan/")
setwd("daily/")
#search the directory and join by the type of pollutants
benzene_files = dir(pattern = "*Benzene")
PM10_files = dir(pattern = "*PM10")
toluene_files = dir(pattern = "*Toluene")
#bind the PM10 files chronologically
PM10_list = lapply(PM10_files, read_xlsx)
Balbriggan_PM10 = do.call(rbind, PM10_list)
#remove rows with headings rather than values in them after the row bind
Balbriggan_PM10 = Balbriggan_PM10[- grep("ug/m3", Balbriggan_PM10$PM10),]
#check the plot
#plot(Balbriggan_PM10, type = "o")
#balbriggan benzene
benzene_list = lapply(benzene_files, read_xlsx)
Balbriggan_Benzene = do.call(rbind, benzene_list)
#balbriggan toluene
toluene_list = lapply(toluene_files , read_xlsx)
Balbriggan_Toluene = do.call(rbind, toluene_list)
#merge the data
Balbriggan_daily = merge(Balbriggan_PM10, Balbriggan_Benzene, by = "Date", all = TRUE)
Balbriggan_daily = merge(Balbriggan_daily, Balbriggan_Toluene, by = "Date", all = TRUE)
#remove rows of data that could be headers
Balbriggan_daily = Balbriggan_daily[- grep("ug/m3", Balbriggan_daily$Benzene ),]
#remove hours from data
Balbriggan_daily$Date = as.Date(Balbriggan_daily$Date,format='%Y-%m-%d %H')
#save file to clean data folder
write_csv(Balbriggan_daily, "../../Gathered_Data/Dublin_Balbriggan_Benzene_Toluene_PM10_daily.csv")
#clean the enviroment
rm(list=ls())
#BALBRIGGAN HOURLY------------------------------------------
setwd("../../Balbriggan/")
setwd("hourly/")
#search the directory and join by the type of pollutants
benzene_files = dir(pattern = "*Benzene")
CO_files = dir(pattern = "*CO")
NOx_files = dir(pattern = "*NOx")
SO2_files = dir(pattern = "*SO2")
toluene_files = dir(pattern = "*Toluene")
#bind benzene files together
benzene_list = lapply(benzene_files, read_xlsx)
Balbriggan_Benzene = do.call(rbind, benzene_list)
#bind the CO files chronologically
CO_list = lapply(CO_files, read_xlsx)
Balbriggan_CO = do.call(rbind, CO_list)
#balbriggan NOx
NOx_list = lapply(NOx_files , read_xlsx)
Balbriggan_NOx = do.call(rbind, NOx_list)
#balbriggan SO2
SO2_list = lapply(SO2_files , read_xlsx)
Balbriggan_SO2 = do.call(rbind, SO2_list)
#remove rows with headings rather than values in them after the row bind
Balbriggan_CO = Balbriggan_CO[- grep("mg/m3", Balbriggan_CO$CO),]
Balbriggan_NOx = Balbriggan_NOx[- grep("ug/m3", Balbriggan_NOx$NOx),]
Balbriggan_SO2 = Balbriggan_SO2[- grep("ug/m3", Balbriggan_SO2$SO2),]
#Combine the date and the time for the different pollutants to help with graphing
Balbriggan_CO$Date <- with(Balbriggan_CO, as.POSIXct(paste(Balbriggan_CO$Date, Balbriggan_CO$Time), format="%Y-%m-%d %H"))
Balbriggan_CO$Time = NULL
Balbriggan_NOx$Date <- with(Balbriggan_NOx, as.POSIXct(paste(Balbriggan_NOx$Date, Balbriggan_NOx$Time), format="%Y-%m-%d %H"))
Balbriggan_NOx$Time = NULL
Balbriggan_SO2$Date <- with(Balbriggan_SO2, as.POSIXct(paste(Balbriggan_SO2$Date, Balbriggan_SO2$Time), format="%Y-%m-%d %H"))
Balbriggan_SO2$Time = NULL
#check the plot
#plot(Balbriggan_CO)
#calculate min, max and mean for hourly data
#CO
Balbriggan_CO$CO = as.numeric(Balbriggan_CO$CO)
mean = aggregate(Balbriggan_CO[names(Balbriggan_CO)!='Date'], list(hour=cut(Balbriggan_CO$Date,'day')), mean, na.rm=F)
colnames(mean)[2] = "CO_Mean"
min = aggregate(Balbriggan_CO[names(Balbriggan_CO)!='Date'], list(hour=cut(Balbriggan_CO$Date,'day')), min, na.rm=F)
colnames(min)[2] = "CO_Min"
max = aggregate(Balbriggan_CO[names(Balbriggan_CO)!='Date'], list(hour=cut(Balbriggan_CO$Date,'day')), max, na.rm=F)
colnames(max)[2] = "CO_Max"
Balbriggan_CO_NOx_SO2_hr_MMM_daily = Reduce(function(x, y) merge(x, y, all=TRUE), list(mean,min,max))
#NOx
Balbriggan_NOx$NOx = as.numeric(Balbriggan_NOx$NOx)
Balbriggan_NOx$NO = as.numeric(Balbriggan_NOx$NO)
Balbriggan_NOx$NO2 = as.numeric(Balbriggan_NOx$NO2)
mean = aggregate(Balbriggan_NOx[names(Balbriggan_NOx)!='Date'], list(hour=cut(Balbriggan_NOx$Date,'day')), mean, na.rm=F)
colnames(mean)[2] = "NOx_Mean"
colnames(mean)[3] = "NO_Mean"
colnames(mean)[4] = "NO2_Mean"
min = aggregate(Balbriggan_NOx[names(Balbriggan_NOx)!='Date'], list(hour=cut(Balbriggan_NOx$Date,'day')), min, na.rm=F)
colnames(min)[2] = "NOx_Min"
colnames(min)[3] = "NO_Min"
colnames(min)[4] = "NO2_Min"
max = aggregate(Balbriggan_NOx[names(Balbriggan_NOx)!='Date'], list(hour=cut(Balbriggan_NOx$Date,'day')), max, na.rm=F)
colnames(max)[2] = "NOx_Max"
colnames(max)[3] = "NO_Max"
colnames(max)[4] = "NO2_Max"
#merge the data with the CO data
Balbriggan_CO_NOx_SO2_hr_MMM_daily = Reduce(function(x, y) merge(x, y, all=TRUE), list(Balbriggan_CO_NOx_SO2_hr_MMM_daily,mean,min,max))
#SO2
Balbriggan_SO2$SO2 = as.numeric(Balbriggan_SO2$SO2)
mean = aggregate(Balbriggan_SO2[names(Balbriggan_SO2)!='Date'], list(hour=cut(Balbriggan_SO2$Date,'day')), mean, na.rm=F)
colnames(mean)[2] = "SO2_Mean"
min = aggregate(Balbriggan_SO2[names(Balbriggan_SO2)!='Date'], list(hour=cut(Balbriggan_SO2$Date,'day')), min, na.rm=F)
colnames(min)[2] = "SO2_Min"
max = aggregate(Balbriggan_SO2[names(Balbriggan_SO2)!='Date'], list(hour=cut(Balbriggan_SO2$Date,'day')), max, na.rm=F)
colnames(max)[2] = "SO2_Max"
#merge the data with existing data
Balbriggan_CO_NOx_SO2_hr_MMM_daily = Reduce(function(x, y) merge(x, y, all=TRUE), list(Balbriggan_CO_NOx_SO2_hr_MMM_daily,mean,min,max))
#change column name to date
colnames(Balbriggan_CO_NOx_SO2_hr_MMM_daily)[1] = "Date"
#remove hours from data
Balbriggan_CO_NOx_SO2_hr_MMM_daily$Date = as.Date(Balbriggan_CO_NOx_SO2_hr_MMM_daily$Date,format='%Y-%m-%d %H')
#save the converted hourly data
write_csv(Balbriggan_CO_NOx_SO2_hr_MMM_daily, "../../Gathered_Data/Dublin_Balbriggan_CO_NOx_SO2_hr_MMM_daily.csv")
#merge the hoourly data
Balbriggan_hourly = merge(Balbriggan_CO, Balbriggan_NOx, by = "Date", all = TRUE)
Balbriggan_hourly = merge(Balbriggan_hourly, Balbriggan_SO2, by = "Date", all = TRUE)
#write out as a csv file into the directory
write_csv(Balbriggan_hourly, "../../Gathered_Data/Dublin_Balbriggan_CO_NOx_SO2_hr.csv")
#clean the enviroment
rm(list=ls())
#BALLYFERMOT DAILY--------------------------------------------------------------------------------
setwd("../../Ballyfermot/")
setwd("daily/")
#for the daily files I manually opened them in excel and changed them from xlx or txt files to xlxs files
PM10_files = dir(pattern = "*PM10")
#bind the PM10 files chronologically
PM10_list = lapply(PM10_files, read_xlsx)
PM10 = do.call(rbind, PM10_list)
#remove any rows with ug/m3 in them
PM10 = PM10[- grep("ug/m3", PM10$PM10),]
#plot(PM10)
#remove hours from data
PM10$Date = as.Date(PM10$Date,format='%Y-%m-%d %H')
#save the data
write_csv(PM10, "../../Gathered_Data/Dublin_Ballyfermot_PM10_ugm3_daily.csv")
#clean the enviroment
rm(list=ls())
#BALLYFERMOT HOURLY--------------------------------------------------------------------------------
setwd("Ballyfermot/")
setwd("hourly/")
#using the library rio to convert txt files types to .csv file types
#already converted commenting out code for testing
# library(rio)
# txt <- dir(pattern = "txt")
# created <- mapply(convert, txt, gsub("txt", "csv", txt))
# unlink(txt) # delete txt files
#
# #change xlsx files to csv
# xlsx <- dir(pattern = "xlsx")
# created <- mapply(convert, xlsx, gsub("xlsx", "csv", xlsx))
# unlink(xlsx)
#
# #change xls files to csv files
# xls <- dir(pattern = "xls")
# created <- mapply(convert, xls, gsub("xls", "csv", xls))
# unlink(xlsx)
# PM10_files
#search for ppb NOX files using logical OR statement
NOx_ppb_files = dir(pattern = "NOx.*ppb|ppb.*NOx")
#NOx ppb files conversion
NOx_ppb_list = lapply(NOx_ppb_files, read_csv)
NOx_ppb = do.call(rbind, NOx_ppb_list)
#clear rows with ppb written in them
NOx_ppb = NOx_ppb[- grep("ppb", NOx_ppb$NO2),]
#convert all the NO columns from strings to numerical values for calculations
NOx_ppb[, 3:5] <- sapply(NOx_ppb[, 3:5], as.numeric)
#is.numeric(NOx_ppb$NO2)
#convert NO from ppb to ugm3. molecular weight is 30. formula is ppb x moleucular weight/22.41
NOx_ppb$NO = NOx_ppb$NO * (30/22.41)
#convert NO2 from ppb to ugm3. molecular weight is 46. formula is ppb x moleucular weight/22.41
NOx_ppb$NO2 = NOx_ppb$NO2 * (46/22.41)
#convert NOX from ppb to ugm3. molecular weight is 46. formula is ppb x moleucular weight/22.41
NOx_ppb$NOx = NOx_ppb$NOx * (46/22.41)
#search for ugm3 NOX files using logical OR statement
NOx_files = dir(pattern = "NOx.*ugm3|ugm3.*NOx")
NOx_list = lapply(NOx_files, read_csv)
NOx = do.call(rbind, NOx_list)
#clear rows with ppb written in them
NOx = NOx[- grep("ug/m3", NOx$NO2),]
#combine both type of files into one
NOx = rbind(NOx,NOx_ppb)
#SO2
#remove columns created from txt conversion to csv. rename the column to be the same as the ugm3 files. save the file replacing existing one
#completed, commented out for testing
# Dublin_Ballyfermot_SO2_2006_ugm3_hr <- read_csv("Dublin_Ballyfermot_SO2_2006_ugm3_hr.csv")
# Dublin_Ballyfermot_SO2_2006_ugm3_hr = Dublin_Ballyfermot_SO2_2006_ugm3_hr[,-c(4:6)]
# names(Dublin_Ballyfermot_SO2_2006_ugm3_hr)[3]<-"SO2"
#
# #Combine the date and the time columns
# Dublin_Ballyfermot_SO2_2006_ugm3_hr$Date = as.Date(Dublin_Ballyfermot_SO2_2006_ugm3_hr$Date, format = "%d/%m/%Y" )
# Dublin_Ballyfermot_SO2_2006_ugm3_hr$Date <- with(Dublin_Ballyfermot_SO2_2006_ugm3_hr, as.POSIXct(paste(Dublin_Ballyfermot_SO2_2006_ugm3_hr$Date, Dublin_Ballyfermot_SO2_2006_ugm3_hr$Time), format="%Y-%m-%d %H"))
#
# #Dublin_Ballyfermot_SO2_2006_ugm3_hr$Date = as.Date(Dublin_Ballyfermot_SO2_2006_ugm3_hr$Date,format='%Y-%m-%d %H')
# write_csv(Dublin_Ballyfermot_SO2_2006_ugm3_hr, "Dublin_Ballyfermot_SO2_2006_ugm3_hr.csv")
#search for ugm3 NOX files using logical OR statement
SO2_files = dir(pattern = "SO2.*ugm3|ugm3.*SO2")
SO2_list = lapply(SO2_files, read_csv)
#rename columns for binding
SO2_list <- lapply(SO2_list, function(x) setNames(x, gsub("^Hour$", "Time", names(x))) )
#bind the SO2 ugm3 data
SO2 = do.call(rbind, SO2_list)
#search for ppb SO2 files using logical OR statement
SO2_ppb_files = dir(pattern = "SO2.*ppb|ppb.*SO2")
SO2_ppb_list = lapply(SO2_ppb_files, read_csv)
SO2_ppb = do.call(rbind, SO2_ppb_list)
#convert SO2 ppb data to ugm3 data for consistency
SO2_ppb$SO2 = as.numeric(SO2_ppb$SO2)
SO2_ppb$SO2 = SO2_ppb$SO2 * (64/22.41)
#bind SO2 data
SO2 = rbind(SO2, SO2_ppb)
#clean possible headings
#SO2 = SO2[- grep("ug/m3", SO2$SO2),]
#SO2 = SO2[- grep("ppb", SO2$SO2),]
#merge the SO2 and NOx data files for Ballyfermot
Ballyfermot_NOx_SO2_hr = merge(NOx,SO2)
#sort time
Ballyfermot_NOx_SO2_hr$Date <- with(Ballyfermot_NOx_SO2_hr, as.POSIXct(paste(Ballyfermot_NOx_SO2_hr$Date, Ballyfermot_NOx_SO2_hr$Time), format="%Y-%m-%d %H"))
#Combine the date and the time for the different pollutants to help with graphing
Ballyfermot_NOx_SO2_hr$Date <- with(Ballyfermot_NOx_SO2_hr, as.POSIXct(paste(Ballyfermot_NOx_SO2_hr$Date, Ballyfermot_NOx_SO2_hr$Time), format="%Y-%m-%d %H"))
Ballyfermot_NOx_SO2_hr$Time = NULL
#save the new ugm3 files in the same directory and delete the old files
write_csv(Ballyfermot_NOx_SO2_hr, "../../Gathered_Data/Dublin_Ballyfermot_NOx_SO2_hr.csv")
#calculate min, max and mean for hourly data
#NOx
NOx$NOx = as.numeric(NOx$NOx)
NOx$NO = as.numeric(NOx$NO)
NOx$NO2 = as.numeric(NOx$NO2)
#atomic vector error workaround
NOx_ave = NOx
#Combine the date and the time
NOx_ave$Date <- with(NOx_ave, as.POSIXct(paste(NOx_ave$Date, NOx_ave$Time), format="%Y-%m-%d %H"))
NOx_ave$Time = NULL
NOx = NOx_ave
mean = aggregate(NOx[names(NOx)!='Date'], list(hour=cut(NOx$Date,'day')), mean, na.rm=F)
names(mean) <- gsub("NOx", "NOx_Mean", names(mean))
names(mean) <- gsub("NO$", "NO_Mean", names(mean))
names(mean) <- gsub("NO2", "NO2_Mean", names(mean))
min = aggregate(NOx[names(NOx)!='Date'], list(hour=cut(NOx$Date,'day')), min, na.rm=F)
names(min) <- gsub("NOx", "NOx_Min", names(min))
names(min) <- gsub("NO$", "NO_Min", names(min))
names(min) <- gsub("NO2", "NO2_Min", names(min))
max = aggregate(NOx[names(NOx)!='Date'], list(hour=cut(NOx$Date,'day')), max, na.rm=F)
names(max) <- gsub("NOx", "NOx_Max", names(max))
names(max) <- gsub("NO$", "NO_Max", names(max))
names(max) <- gsub("NO2", "NO2_Max", names(max))
#merge the data with the CO data
Ballyfermot_NOx_SO2_hr_MMM_daily = Reduce(function(x, y) merge(x, y, all=TRUE), list(mean,min,max))
#SO2
Balbriggan_SO2$SO2 = as.numeric(Balbriggan_SO2$SO2)
mean = aggregate(Balbriggan_SO2[names(Balbriggan_SO2)!='Date'], list(hour=cut(Balbriggan_SO2$Date,'day')), mean, na.rm=F)
colnames(mean)[2] = "SO2_Mean"
min = aggregate(Balbriggan_SO2[names(Balbriggan_SO2)!='Date'], list(hour=cut(Balbriggan_SO2$Date,'day')), min, na.rm=F)
colnames(min)[2] = "SO2_Min"
max = aggregate(Balbriggan_SO2[names(Balbriggan_SO2)!='Date'], list(hour=cut(Balbriggan_SO2$Date,'day')), max, na.rm=F)
colnames(max)[2] = "SO2_Max"
#merge the SO2 data with existing data
Ballyfermot_NOx_SO2_hr_MMM_daily = Reduce(function(x, y) merge(x, y, all=TRUE), list(Ballyfermot_NOx_SO2_hr_MMM_daily,mean,min,max))
#change column name to date
colnames(Ballyfermot_NOx_SO2_hr_MMM_daily)[1] = "Date"
#remove hours from data
Ballyfermot_NOx_SO2_hr_MMM_daily$Date = as.Date(Ballyfermot_NOx_SO2_hr_MMM_daily$Date,format='%Y-%m-%d %H')
#save the converted hourly data
write_csv(Ballyfermot_NOx_SO2_hr_MMM_daily, "../../Gathered_Data/Dublin_Ballyfermot_NOx_SO2_hr_MMM_daily.csv")
#clean the enviroment
rm(list=ls())
#BLANCHARDSTOWN DAILY--------------------------------------------------------------------------------
setwd('..')
setwd('..')
setwd("Blanchardstown/daily")
#convert xls files to csv files and delete the xls files. convert all of the data types to csv to help import files better
#csv was chosen as R seems to like it and could not find a way to convert an xls file to xlxs
# xls <- dir(pattern = "(.*)xls$")
# created <- mapply(convert, xls, gsub("xls", "csv", xls))
# unlink(xls)
#
# #convert xlsx files to csv files and delete the xlsx files
# xlsx <- dir(pattern = "(.*)xlsx$")
# created <- mapply(convert, xlsx, gsub("xlsx", "csv", xlsx))
# unlink(xlsx)
#search for bind the PM10 files chronologically
PM10_files = dir(pattern = "*PM10")
PM10_list = lapply(PM10_files, read_csv)
PM10 = do.call(rbind, PM10_list)
#plot(PM10)
#clear rows with ug/m3 written in them
PM10 = PM10[- grep("ug/m3", PM10$PM10),]
#remove hours from data
PM10$Date = as.Date(PM10$Date,format='%Y-%m-%d %H')
#save the daily data for Blanchardstown
write_csv(PM10, "../../Gathered_Data/Dublin_Blanchardstown_PM10_daily.csv")
#BLANCHARDSTOWN HOURLY--------------------------------------------------------------------------------
setwd("../hourly")
#NOx ppb files conversion
ppb_NOx = dir(pattern = "ppb")
NOx_ppb_list = lapply(ppb_NOx, read_csv)
NOx_ppb = do.call(rbind, NOx_ppb_list)
#clear rows with ppb written in them
NOx_ppb = NOx_ppb[- grep("ppb", NOx_ppb$NO2),]
#convert all the NO columns from strings to numerical values for calculations
NOx_ppb[, 3:5] <- sapply(NOx_ppb[, 3:5], as.numeric)
is.numeric(NOx_ppb$NO2)
#convert NOX from ppb to ugm3. molecular weight is 46 formula is ppb x moleucular weight/22.41
NOx_ppb$NOx = NOx_ppb$NOx * (46/22.41)
#convert NO from ppb to ugm3. molecular weight is 30. formula is ppb x moleucular weight/22.41
NOx_ppb$NO = NOx_ppb$NO * (30/22.41)
#convert NO2 from ppb to ugm3. molecular weight is 46. formula is ppb x moleucular weight/22.41
NOx_ppb$NO2 = NOx_ppb$NO2 * (46/22.41)
#remove the pbb files
unlink(ppb_NOx)
#bind the NOx files chronologically
NOx_files = dir(pattern = "*NOx")
NOx_list = lapply(NOx_files, read_csv)
NOx = do.call(rbind, NOx_list)
#clean the data of headings
NOx = NOx[- grep("ug/m3", NOx$NOx),]
#atomic vector error so putting it into a new dataframe
Blanchardstown_NOx_hr = NOx
#sort time
Blanchardstown_NOx_hr$Date <- with(Blanchardstown_NOx_hr, as.POSIXct(paste(Blanchardstown_NOx_hr$Date, Blanchardstown_NOx_hr$Time), format="%Y-%m-%d %H"))
Blanchardstown_NOx_hr$Time = NULL
#save the output
write.csv(Blanchardstown_NOx_hr, file = "../../Gathered_Data/Dublin_Blanchardstown_NOx_hr.csv")
#determine mean,max, min for the hourly dataset
NOx = Blanchardstown_NOx_hr
#convert all the NO columns from strings to numerical values for calculations
NOx[, 2:4] <- sapply(NOx[, 2:4], as.numeric)
mean = aggregate(NOx[names(NOx)!='Date'], list(hour=cut(NOx$Date,'day')), mean, na.rm=F)
names(mean) <- gsub("NOx", "NOx_Mean", names(mean))
names(mean) <- gsub("NO$", "NO_Mean", names(mean))
names(mean) <- gsub("NO2", "NO2_Mean", names(mean))
min = aggregate(NOx[names(NOx)!='Date'], list(hour=cut(NOx$Date,'day')), min, na.rm=F)
names(min) <- gsub("NOx", "NOx_Min", names(min))
names(min) <- gsub("NO$", "NO_Min", names(min))
names(min) <- gsub("NO2", "NO2_Min", names(min))
max = aggregate(NOx[names(NOx)!='Date'], list(hour=cut(NOx$Date,'day')), max, na.rm=F)
names(max) <- gsub("NOx", "NOx_Max", names(max))
names(max) <- gsub("NO$", "NO_Max", names(max))
names(max) <- gsub("NO2", "NO2_Max", names(max))
#merge the data
Blanchardstown_NOx_MMM_daily = Reduce(function(x, y) merge(x, y, all=TRUE), list(mean,min,max))
#change column name to date
colnames(Blanchardstown_NOx_MMM_daily)[1] = "Date"
#remove hours from data
Blanchardstown_NOx_MMM_daily$Date = as.Date(Blanchardstown_NOx_MMM_daily$Date,format='%Y-%m-%d %H')
#save the converted hourly data
write_csv(Blanchardstown_NOx_MMM_daily, "../../Gathered_Data/Dublin_Blanchardstown_NOx_MMM_daily.csv")
#clean the enviroment
rm(list=ls())
#CITY COUNCIL --------------------------------------------------------------------------------
setwd('..')
setwd('..')
setwd("CityCouncil/")
#Smoke
#bind the Smoke files
Files = dir(pattern = "Smoke")
List = lapply(Files, read_xls)
smoke = do.call(rbind.fill, List)
#change Location column to Date
colnames(smoke)[1] = "Date"
#need to convert numeric excel date to standard date format
library(janitor)
smoke$Date = excel_numeric_to_date(as.numeric(as.character(smoke$Date)), date_system = "modern")
#merge columns that are duplicated due to header name changes
smoke$BrunswickSt <- ifelse(is.na(smoke$`BRUNSWICK ST`), smoke$`BRUNSWICK ST.`, smoke$`BRUNSWICK ST`)
smoke$`BRUNSWICK ST` = NULL
smoke$`BRUNSWICK ST.`= NULL
smoke$RDS2 <- ifelse(is.na(smoke$R.D.S), smoke$RDS, smoke$R.D.S)
smoke$R.D.S = NULL
smoke$RDS = NULL
smoke$HerbertSt <- ifelse(is.na(smoke$`HERBERT ST`), smoke$`HERBERT ST.` , smoke$`HERBERT ST`)
smoke$`HERBERT ST` = NULL
smoke$`HERBERT ST.` = NULL
smoke$OldCountyRd <- ifelse(is.na(smoke$`OLD COUNTY RD`), smoke$`OLD COUNTRY ROAD` , smoke$`OLD COUNTY RD`)
smoke$`OLD COUNTY RD` = NULL
smoke$`OLD COUNTRY ROAD` = NULL
#count nas per column
#map(smoke, ~sum(is.na(.)))
#clean the data of headings
smoke = smoke[- grep("ugm3", smoke$OldCountyRd),]
#add SO2 to all road names
colnames(smoke) <- paste(colnames(smoke), "smoke", sep = "_")
colnames(smoke)[1] = "Date"
#plot(smoke, typle = "l")
#SO2
#bind the Pb files
Files = dir(pattern = "SO2Bubbler")
List = lapply(Files, read_xls)
SO2 = do.call(rbind.fill, List)
#change Location column to Date
colnames(SO2)[1] = "Date"
#need to convert numeric excel date to standard date format
library(janitor)
SO2$Date = excel_numeric_to_date(as.numeric(as.character(SO2$Date)), date_system = "modern")
#merge columns that are duplicated due to header name changes
SO2$BrunswickSt <- ifelse(is.na(SO2$`BRUNSWICK ST`), SO2$`BRUNSWICK ST.`, SO2$`BRUNSWICK ST`)
SO2$`BRUNSWICK ST` = NULL
SO2$`BRUNSWICK ST.`= NULL
SO2$RDS2 <- ifelse(is.na(SO2$R.D.S), SO2$RDS, SO2$R.D.S)
SO2$R.D.S = NULL
SO2$RDS = NULL
SO2$HerbertSt <- ifelse(is.na(SO2$`HERBERT ST`), SO2$`HERBERT ST.` , SO2$`HERBERT ST`)
SO2$`HERBERT ST` = NULL
SO2$`HERBERT ST.` = NULL
SO2$OldCountyRd <- ifelse(is.na(SO2$`OLD COUNTY RD`), SO2$`OLD COUNTRY ROAD` , SO2$`OLD COUNTY RD`)
SO2$`OLD COUNTY RD` = NULL
SO2$`OLD COUNTRY ROAD` = NULL
#sort columns alphabetically to make sure there are no duplicate street names
SO2[ , order(names(SO2))]
#add SO2 to all road names
colnames(SO2) <- paste(colnames(SO2), "SO2", sep = "_")
colnames(SO2)[1] = "Date"
#clean the data of headings
SO2 = SO2[- grep("ugm3", SO2$OldCountyRd),]
#merge datasets
Dublin_CityCouncil_Old_Smoke_SO2_daily = merge(smoke, SO2)
#save the data
write.csv(Dublin_CityCouncil_Old_Smoke_SO2_daily, file = "../Gathered_Data/Dublin_CityCouncil_Old_Smoke_SO2_daily.csv")
#CLONSKEAGH HOURLY --------------------------------------------------------------------------------
setwd('..')
setwd("Clonskeagh/")
#find ppb files
O3_ppb_files = dir(pattern = "O3.*ppb|ppb.*O3")
O3_ppb_list = lapply(O3_ppb_files, read_csv)
O3_ppb = do.call(rbind, O3_ppb_list)
#clear rows with ppb written in them
O3_ppb = O3_ppb[- grep("ppb", O3_ppb$ozone),]
#convert all the O3 columns from strings to numerical values for calculations
O3_ppb[, 3] <- sapply(O3_ppb[,3], as.numeric)
is.numeric(O3_ppb$ozone)
#convert O3 from ppb to ugm3. molecular weight is 48. formula is ppb x moleucular weight/22.41
O3_ppb$ozone =O3_ppb$ozone * (48/22.41)
#convert mgm3 to ugm3 for 2008 file
#Dublin_Clonskeagh_O3_2008 <- read_csv("Dublin_Clonskeagh_O3_2008.csv")
#Dublin_Clonskeagh_O3_2008$ozone = as.numeric(Dublin_Clonskeagh_O3_2008$ozone)/1000
#convert xls files to csv files and delete the xls files
#steps already completed
# xls <- dir(pattern = "(.*)xls$")
# created <- mapply(convert, xls, gsub("xls", "csv", xls))
# unlink(xls)
#
# #convert xlsx files to csv files and delete the xlsx files
# xlsx <- dir(pattern = "(.*)xlsx$")
# created <- mapply(convert, xlsx, gsub("xlsx", "csv", xlsx))
# unlink(xlsx)
#search for bind the PM10 files
O3_files = dir(pattern = "O3.*ugm3|ugm3.*O3")
O3_list = lapply(O3_files, read_csv)
O3 = do.call(rbind, O3_list)
#combine converted ppb files
O3 = rbind(O3,O3_ppb)
#comibine date and hour columnsß
O3$Date <- with(O3, as.POSIXct(paste(O3$Date, O3$Time), format="%Y-%m-%d %H"))
O3$Time = NULL
#remove any rows with ug/m3 in them
O3 = O3[- grep("ug/m3", O3$ozone),]
O3 = O3[- grep("ugm-3", O3$ozone),]
O3 = O3[- grep("mg/m3", O3$ozone),]
#see if it looks ok
#plot(O3)
#save the data
write_csv(O3, "../Gathered_Data/Dublin_Clonskeagh_ozone_hr.csv")
#calculate the mean, max and min of ozone
O3$ozone = as.numeric(O3$ozone)
O3$Date = as.Date(O3$Date, format = "%Y-%m-%d")
mean = aggregate(O3[names(O3)!='Date'], list(hour=cut(O3$Date,'day')), mean, na.rm=F)
names(mean) <- gsub("ozone", "ozone_Mean", names(mean))
min = aggregate(O3[names(O3)!='Date'], list(hour=cut(O3$Date,'day')), min, na.rm=F)
names(min) <- gsub("ozone", "ozone_Min", names(min))
max = aggregate(O3[names(O3)!='Date'], list(hour=cut(O3$Date,'day')), max, na.rm=F)
names(max) <- gsub("ozone", "ozone_Max", names(max))
#remove hours from data
# min$hour = as.Date(min$hour,format='%Y-%m-%d %H')
# max$hour = as.Date(max$hour,format='%Y-%m-%d %H')
# mean$hour = as.Date(mean$hour,format='%Y-%m-%d %H')
#merge the data
Clonskeagh_ozone_MMM_daily = Reduce(function(x, y) merge(x, y, all=TRUE), list(mean,min,max))
#change column name to date
colnames(Clonskeagh_ozone_MMM_daily)[1] = "Date"
#remove hours from data
Clonskeagh_ozone_MMM_daily$Date = as.Date(Clonskeagh_ozone_MMM_daily$Date,format='%Y-%m-%d %H')
#save the converted hourly data
write_csv(Clonskeagh_ozone_MMM_daily, "../Gathered_Data/Dublin_Clonskeagh_ozone_MMM_daily.csv")
#clean the enviroment
rm(list=ls())
#CLONTARF --------------------------------------------------------------------------------
setwd('..')
setwd("Clontarf/")
#import the data using whitespace to seperate the columns
Dublin_Clontarf_PM10_ugm3_daily <- read_table2("Dublin_Clontarf_PM10_1996.txt")
#not much data present so it is unusable
#clean the enviroment
rm(list=ls())
#COLERAINE STREET HOURLY--------------------------------------------------------------------------------
setwd('..')
setwd("ColeraineStreet/hourly")
#convert xls files to csv files and delete the xls files
# xls <- dir(pattern = "(.*)xls$")
# created <- mapply(convert, xls, gsub("xls", "csv", xls))
# unlink(xls)
#
# #convert xlsx files to csv files and delete the xlsx files
# xlsx <- dir(pattern = "(.*)xlsx$")
# created <- mapply(convert, xlsx, gsub("xlsx", "csv", xlsx))
# unlink(xlsx)
#
# #convert txt files to csv files and delete the txt files
# txt <- dir(pattern = "(.*)txt$")
# created <- mapply(convert, txt, gsub("txt", "csv", txt))
# unlink(txt)
#CO
#search for and bind the CO files chronologically. combine the time and date columns into one column
CO_files = dir(pattern = "CO.*mgm3|mgm3.*CO")
CO_list = lapply(CO_files, read_csv)
CO = do.call(rbind, CO_list)
#clear old headers
CO = CO[- grep("mg/m3", CO$CO ),]
#import the ppm files and convert to mg/m3
CO_ppm_files = dir(pattern = "CO.*ppm|ppm.*CO")
CO_ppm_list = lapply(CO_ppm_files, read_csv)
CO_ppm = do.call(rbind, CO_ppm_list)
#clear old headers
CO_ppm = CO_ppm[- grep("ppm", CO_ppm$CO ),]
#convert ppm to mgm3. CO molecular weight is 28. convert from a tring to numerical values
CO_ppm$CO = as.numeric(CO_ppm$CO)
CO_ppm$CO = CO_ppm$CO * (28/22.41)
#bind the 2 CO files
CO = rbind(CO,CO_ppm)
#NOx
#NOx ppb files conversion
ppb_NOx = dir(pattern = "NOx.*ppb|ppb.*NOx")
NOx_ppb_list = lapply(ppb_NOx, read_csv)
NOx_ppb = do.call(rbind, NOx_ppb_list)
#clear rows with ppb written in them
NOx_ppb = NOx_ppb[- grep("ppb", NOx_ppb$NO2),]
#convert all the NO columns from strings to numerical values for calculations
NOx_ppb[, 3:5] <- sapply(NOx_ppb[, 3:5], as.numeric)
#is.numeric(NOx_ppb$NO2)
#convert NO from ppb to ugm3. molecular weight is 30. formula is ppb x moleucular weight/22.41
NOx_ppb$NO = NOx_ppb$NO * (30/22.41)
#convert NO2 from ppb to ugm3. molecular weight is 46. formula is ppb x moleucular weight/22.41
NOx_ppb$NO2 = NOx_ppb$NO2 * (46/22.41)
#convert NOX from ppb to ugm3
NOx_ppb$NOx = NOx_ppb$NOx * (46/22.41)
#NOx ugm3 files
NOx_files = dir(pattern = "*NOx.*ugm3|ugm3.*NOx")
NOx_list = lapply(NOx_files, read_csv)
NOx = do.call(rbind, NOx_list)
#clean the data of headings
NOx = NOx[- grep("ug/m3", NOx$NOx),]
#bind converted ppb files and ugm3 files
NOx = rbind(NOx, NOx_ppb)
#plot(NOx)
#SO2
#some file preperation required / needed to run this code only once, commented out for testing purpouses
# Dublin_ColeraineSt_SO2_2006_ugm3_hr <- read_csv("Dublin_ColeraineSt_SO2_2006_ugm3_hr.csv")
# Dublin_ColeraineSt_SO2_2006_ugm3_hr = as.data.frame(subset(Dublin_ColeraineSt_SO2_2006_ugm3_hr, select=-c(V4,V5,V6)))
# colnames(Dublin_ColeraineSt_SO2_2006_ugm3_hr)[colnames(Dublin_ColeraineSt_SO2_2006_ugm3_hr) == 'SO2(ug/m3)'] <- 'SO2'
# write.csv(Dublin_ColeraineSt_SO2_2006_ugm3_hr, file = "Dublin_ColeraineSt_SO2_2006_ugm3_hr.csv", row.names=FALSE)
#search for ugm3 SO2 files using logical OR statement
SO2_files = dir(pattern = "SO2.*ugm3|ugm3.*SO2")
SO2_list = lapply(SO2_files, read_csv)
#rename columns for binding. Changing columns labelled Hour to newer Time format used by the EPA. change SO2 columns to the same heading
SO2_list <- lapply(SO2_list, function(x) setNames(x, gsub("^Hour$", "Time", names(x))) )
#bind the SO2 ugm3 data
SO2 = do.call(rbind, SO2_list)
#search for ppb SO2 files using logical OR statement
SO2_ppb_files = dir(pattern = "SO2.*ppb|ppb.*SO2")
SO2_ppb_list = lapply(SO2_ppb_files, read_csv)
SO2_ppb = do.call(rbind, SO2_ppb_list)
#bind SO2 data
SO2 = rbind(SO2, SO2_ppb)
#clean possible headings
SO2 = SO2[- grep("ug/m3", SO2$SO2),]
SO2 = SO2[- grep("ppb", SO2$SO2),]
#merge the SO2 and NOx data files for Ballyfermot
ColeraineSt_CO_NOx_SO2_hr = merge(CO,NOx)
ColeraineSt_CO_NOx_SO2_hr = merge(ColeraineSt_CO_NOx_SO2_hr,SO2)
#combine date and hour and write to directory
ColeraineSt_CO_NOx_SO2_hr$Date <- with(ColeraineSt_CO_NOx_SO2_hr, as.POSIXct(paste(ColeraineSt_CO_NOx_SO2_hr$Date, ColeraineSt_CO_NOx_SO2_hr$Time), format="%Y-%m-%d %H"))
ColeraineSt_CO_NOx_SO2_hr$Time = NULL
write_csv(ColeraineSt_CO_NOx_SO2_hr, "../../Gathered_Data/Dublin_ColeraineSt_CO_NOx_SO2_hr.csv")
#calculate min, max and mean for hourly data
#CO
#combine date and hour columns. atomic vector error workaround
CO_ave = CO
CO_ave$Date <- with(CO_ave, as.POSIXct(paste(CO_ave$Date, CO_ave$Time), format="%Y-%m-%d %H"))
CO_ave$Time = NULL
CO = CO_ave
CO$CO = as.numeric(CO$CO)
mean = aggregate(CO[names(CO)!='Date'], list(hour=cut(CO$Date,'day')), mean, na.rm=F)
colnames(mean)[2] = "CO_Mean"
min = aggregate(CO[names(CO)!='Date'], list(hour=cut(CO$Date,'day')), min, na.rm=F)
colnames(min)[2] = "CO_Min"
max = aggregate(CO[names(CO)!='Date'], list(hour=cut(CO$Date,'day')), max, na.rm=F)
colnames(max)[2] = "CO_Max"
ColeraineStreet_CO_NOx_SO2_hr_MMM_daily = Reduce(function(x, y) merge(x, y, all=TRUE), list(mean,min,max))
#NOx
#combine date and hour columns. atomic vector error workaround
NOx_ave = NOx
NOx_ave$Date <- with(NOx_ave, as.POSIXct(paste(NOx_ave$Date, NOx_ave$Time), format="%Y-%m-%d %H"))
NOx_ave$Time = NULL
NOx = NOx_ave
#change from string to numeric values
NOx$NOx = as.numeric(NOx$NOx)
NOx$NO = as.numeric(NOx$NO)
NOx$NO2 = as.numeric(NOx$NO2)
mean = aggregate(NOx[names(NOx)!='Date'], list(hour=cut(NOx$Date,'day')), mean, na.rm=F)
names(mean) <- gsub("NOx", "NOx_Mean", names(mean))
names(mean) <- gsub("NO$", "NO_Mean", names(mean))
names(mean) <- gsub("NO2", "NO2_Mean", names(mean))
min = aggregate(NOx[names(NOx)!='Date'], list(hour=cut(NOx$Date,'day')), min, na.rm=F)
names(min) <- gsub("NOx", "NOx_Min", names(min))
names(min) <- gsub("NO$", "NO_Min", names(min))
names(min) <- gsub("NO2", "NO2_Min", names(min))
max = aggregate(NOx[names(NOx)!='Date'], list(hour=cut(NOx$Date,'day')), max, na.rm=F)
names(max) <- gsub("NOx", "NOx_Max", names(max))
names(max) <- gsub("NO$", "NO_Max", names(max))
names(max) <- gsub("NO2", "NO2_Max", names(max))
#merge the data with the CO data
ColeraineStreet_CO_NOx_SO2_hr_MMM_daily = Reduce(function(x, y) merge(x, y, all=TRUE), list(ColeraineStreet_CO_NOx_SO2_hr_MMM_daily,mean,min,max))
#SO2
#combine date and hour columns. atomic vector error workaround
SO2_ave = SO2
SO2_ave$Date <- with(SO2_ave, as.POSIXct(paste(SO2_ave$Date, SO2_ave$Time), format="%Y-%m-%d %H"))
SO2_ave$Time = NULL
SO2 = SO2_ave
SO2$SO2 = as.numeric(SO2$SO2)
mean = aggregate(SO2[names(SO2)!='Date'], list(hour=cut(SO2$Date,'day')), mean, na.rm=F)
colnames(mean)[2] = "SO2_Mean"
min = aggregate(SO2[names(SO2)!='Date'], list(hour=cut(SO2$Date,'day')), min, na.rm=F)
colnames(min)[2] = "SO2_Min"
max = aggregate(SO2[names(SO2)!='Date'], list(hour=cut(SO2$Date,'day')), max, na.rm=F)
colnames(max)[2] = "SO2_Max"
#merge the data with existing data
ColeraineStreet_CO_NOx_SO2_hr_MMM_daily = Reduce(function(x, y) merge(x, y, all=TRUE), list(ColeraineStreet_CO_NOx_SO2_hr_MMM_daily,mean,min,max))
#change column name to date
colnames(ColeraineStreet_CO_NOx_SO2_hr_MMM_daily)[1] = "Date"
#remove hours from data
ColeraineStreet_CO_NOx_SO2_hr_MMM_daily$Date = as.Date(ColeraineStreet_CO_NOx_SO2_hr_MMM_daily$Date,format='%Y-%m-%d %H')
#save the converted hourly data
write_csv(ColeraineStreet_CO_NOx_SO2_hr_MMM_daily, "../../Gathered_Data/Dublin_ColeraineSt_CO_NOx_SO2_hr_MMM_daily.csv")
#plot(ColeraineSt_CO_NOx_SO2_hr)
#clean the enviroment
rm(list=ls())
#COLERAINE STREET DAILY--------------------------------------------------------------------------------
#Lead - Pb
setwd('../daily')
#convert xls files to csv files and delete the xls files
#commeneted out as it has already been completed
# xls <- dir(pattern = "(.*)xls$")
# created <- mapply(convert, xls, gsub("xls", "csv", xls))
# unlink(xls)
#
# #convert xlsx files to csv files and delete the xlsx files
# xlsx <- dir(pattern = "(.*)xlsx$")
# created <- mapply(convert, xlsx, gsub("xlsx", "csv", xlsx))
# unlink(xlsx)
#
# #convert txt files to csv files and delete the txt files
# txt <- dir(pattern = "(.*)txt$")
# created <- mapply(convert, txt, gsub("txt", "csv", txt))
# unlink(txt)
#Pb
#bind the Pb files
Pb_files = dir(pattern = "*Pb")
Pb_list = lapply(Pb_files, read_csv)
Pb = do.call(rbind, Pb_list)
#clean the data of headers
Pb = Pb[- grep("ug/m3", Pb$Pb),]
#bind the PM2.5 files
PM25_files = dir(pattern = "*PM25")
PM25_list = lapply(PM25_files, read_csv)
PM25 = do.call(rbind, PM25_list)
#clean the data of headers
PM25 = PM25[- grep("ug/m3", PM25$PM2.5),]
#commented out for testing code, only needed to run this code once
#PM10
#some files need tidying up ie reorganinsing and head name changes
# Dublin_ColeraineSt_PM10_2004_ugm3 <- read_csv("Dublin_ColeraineSt_PM10_2004_ugm3.csv")
# Dublin_ColeraineSt_PM10_2004_ugm3$`PM10(ug/m3)` = NULL
# names(Dublin_ColeraineSt_PM10_2004_ugm3)[names(Dublin_ColeraineSt_PM10_2004_ugm3) == "Date"] = "PM10"
# names(Dublin_ColeraineSt_PM10_2004_ugm3)[names(Dublin_ColeraineSt_PM10_2004_ugm3) == "V1"] = "Date"
#
# #change the date format for binding
# Dublin_ColeraineSt_PM10_2004_ugm3$Date = parse_date_time(Dublin_ColeraineSt_PM10_2004_ugm3$Date, c('dmy', 'ymd'))
#
# write.csv(Dublin_ColeraineSt_PM10_2004_ugm3, file = "Dublin_ColeraineSt_PM10_2004_ugm3.csv", row.names=FALSE)
#
# #PM10_2005 file
# Dublin_ColeraineSt_PM10_2005_ugm3 <- read_csv("Dublin_ColeraineSt_PM10_2005_ugm3.csv")
# Dublin_ColeraineSt_PM10_2005_ugm3$`PM10(ug/m3)` = NULL
# names(Dublin_ColeraineSt_PM10_2005_ugm3)[names(Dublin_ColeraineSt_PM10_2005_ugm3) == "Date"] = "PM10"
# names(Dublin_ColeraineSt_PM10_2005_ugm3)[names(Dublin_ColeraineSt_PM10_2005_ugm3) == "V1"] = "Date"
# Dublin_ColeraineSt_PM10_2005_ugm3$Date = parse_date_time(Dublin_ColeraineSt_PM10_2005_ugm3$Date, c('dmy', 'ymd'))
# write.csv(Dublin_ColeraineSt_PM10_2005_ugm3, file = "Dublin_ColeraineSt_PM10_2005_ugm3.csv", row.names=FALSE)
#
# #PM10_2006 file
# Dublin_ColeraineSt_PM10_2006_ugm3 <- read_csv("Dublin_ColeraineSt_PM10_2006_ugm3.csv")
# Dublin_ColeraineSt_PM10_2006_ugm3 <- Dublin_ColeraineSt_PM10_2006_ugm3[, -c(3:4)]
# colnames(Dublin_ColeraineSt_PM10_2006_ugm3)[2] = "PM10"
# Dublin_ColeraineSt_PM10_2006_ugm3$Date = parse_date_time(Dublin_ColeraineSt_PM10_2006_ugm3$Date, c('dmy', 'ymd'))
# write.csv(Dublin_ColeraineSt_PM10_2006_ugm3, file = "Dublin_ColeraineSt_PM10_2006_ugm3.csv", row.names=FALSE)
#bind the PM10 files
PM10_files = dir(pattern = "*PM10")
PM10_list = lapply(PM10_files, read_csv)
PM10 = do.call(rbind, PM10_list)
#clean the data of headers
PM10 = PM10[- grep("ug/m3", PM10$PM10),]
PM10 = PM10[- grep("ugm3", PM10$PM10),]
#set all date columns to time so r can merge the files
PM10$Date = as.Date(PM10$Date)
PM25$Date = as.Date(PM25$Date)
Pb$Date = as.Date(Pb$Date)
#merge daily data
Dublin_ColeraineSt_Pb_PM10_PM25_daily = merge(PM10, Pb)
Dublin_ColeraineSt_Pb_PM10_PM25_daily = merge(Dublin_ColeraineSt_Pb_PM10_PM25_daily, PM25, by = "Date", all = TRUE)
#save the cleaned dateframe
write.csv(Dublin_ColeraineSt_Pb_PM10_PM25_daily, file = "../../Gathered_Data/Dublin_ColeraineSt_Pb_PM10_PM25_daily.csv", row.names=FALSE)
#clean the enviroment
rm(list=ls())
#COLLEGE GREEN DAILY--------------------------------------------------------------------------------
setwd('..')
setwd('..')
setwd("CollegeGreen/")
#read in PM10 files
PM10_files = dir(pattern = "PM10.*txt")
PM10_list = lapply(PM10_files, read_table2)
PM10 = do.call(rbind, PM10_list)
#change column name for binding
colnames(PM10)[2] = "PM10"
#make sure R recognises Date column from txt files as a date column for proper binding
PM10$Date = as.Date(PM10$Date, format="%d/%m/%Y")
#import xls files
PM10_files = dir(pattern = "PM10.*xls")
PM10_list = lapply(PM10_files, read_xls)
PM10_xls = do.call(rbind, PM10_list)
#make sure R recognises Date column from txt files as a date column for proper binding
PM10_xls$Date = as.Date(PM10_xls$Date, format="%Y/%m/%d")
#bind the text files and the xls file together. clean the data of headers
PM10 = rbind(PM10, PM10_xls)
PM10 = PM10[- grep("ug/m3", PM10$PM10),]
#save the data
write.csv(PM10, file = "../Gathered_Data/Dublin_CollegeGreen_PM10_daily.csv", row.names=FALSE)
#clean the enviroment
rm(list=ls())
#COUNTY COUNCIL --------------------------------------------------------------------------------
setwd('..')
setwd("CountyCouncil/")
#Smoke
#bind the smoke files
Files = dir(pattern = "Smoke")
List = lapply(Files, read_xls)
smoke = do.call(rbind.fill, List)
#change Location column to Date
colnames(smoke)[1] = "Date"
#need to convert numeric excel date to standard date format
library(janitor)
smoke$Date = excel_numeric_to_date(as.numeric(as.character(smoke$Date)), date_system = "modern")
#sort columns alphabetically to make sure there are no duplicate street names
test = smoke[ , order(names(smoke))]
#merge columns that are duplicated due to header name changes
smoke$Avonbeg <- ifelse(is.na(smoke$ABEG), smoke$AVONBEG, smoke$ABEG)
smoke$ABEG = NULL
smoke$AVONBEG= NULL
smoke$Balbriggan <- ifelse(is.na(smoke$BALBRIGGAN), smoke$BBGAN, smoke$BALBRIGGAN)
smoke$BALBRIGGAN = NULL
smoke$BBGAN = NULL
smoke$Brookfield <- ifelse(is.na(smoke$BROOKFIELD ), smoke$BROOK , smoke$BROOKFIELD)
smoke$BROOKFIELD = NULL
smoke$BROOK = NULL
smoke$MountAnville <- ifelse(is.na(smoke$`MOUNT ANVIL` ), smoke$`MOUNT ANVILLE` , smoke$`MOUNT ANVIL`)
smoke$`MOUNT ANVIL` = NULL
smoke$`MOUNT ANVILLE` = NULL
smoke$QuarryVale <- ifelse(is.na(smoke$QVALE ), smoke$QUARRYVALE , smoke$QVALE)
smoke$QVALE = NULL
smoke$QUARRYVALE = NULL
smoke$QuarryVale = ifelse(is.na(smoke$QuarryVale), smoke$QUARYVALE, smoke$QuarryVale)
smoke$QUARYVALE = NULL
#count nas per column
map(smoke, ~sum(is.na(.)))
#clean the data of headings
smoke = smoke[- grep("ugm3", smoke$DUNLAOIRE),]
#add smoke to all road names
colnames(smoke) <- paste(colnames(smoke), "smoke", sep = "_")
colnames(smoke)[1] = "Date"
#plot(smoke, typle = "l")
#SO2
#bind the Pb files
Files = dir(pattern = "SO2Bubbler")
List = lapply(Files, read_xls)
SO2 = do.call(rbind.fill, List)
#change Location column to Date
colnames(SO2)[1] = "Date"
#need to convert numeric excel date to standard date format
library(janitor)
SO2$Date = excel_numeric_to_date(as.numeric(as.character(SO2$Date)), date_system = "modern")
#sort columns alphabetically to make sure there are no duplicate street names
#test = SO2[ , order(names(SO2))]
#merge columns that are duplicated due to header name changes
SO2$MountAnville <- ifelse(is.na(SO2$`MOUNT ANVIL` ), SO2$`MOUNT ANVILLE` , SO2$`MOUNT ANVIL`)
SO2$`MOUNT ANVIL` = NULL
SO2$`MOUNT ANVILLE` = NULL
#add SO2 to all road names
colnames(SO2) <- paste(colnames(SO2), "SO2", sep = "_")
colnames(SO2)[1] = "Date"
#clean the data of headings
SO2 = SO2[- grep("ugm3", SO2$DUNLAOIRE_SO2),]
#merge datasets
Dublin_CountyCouncil_Old_Smoke_SO2_daily = merge(smoke, SO2)
#save the data
write.csv(Dublin_CountyCouncil_Old_Smoke_SO2_daily, file = "../Gathered_Data/Dublin_CountyCouncil_Old_Smoke_SO2_daily_daily.csv")
#CRUMLIN HOURLY--------------------------------------------------------------------------------
setwd('..')
setwd("Crumlin/")
#import benzene files
benzene_files = dir(pattern = "*Benzene")
benzene_list = lapply(benzene_files, read_xls)
Benzene = do.call(rbind, benzene_list)
Benzene_hr = Benzene
#combine date and hour columns
Benzene_hr$Date <- with(Benzene_hr, as.POSIXct(paste(Benzene_hr$Date, Benzene_hr$Hour), format="%Y-%m-%d %H"))
Benzene_hr$Hour = NULL
#search for ppb NOX files using logical OR statement
NOx_ppb_files = dir(pattern = "NOx.*ppb|ppb.*NOx")
#NOx ppb files conversion
NOx_ppb_list = lapply(NOx_ppb_files, read_xls)
NOx_ppb = do.call(rbind, NOx_ppb_list)
#clear rows with ppb written in them
NOx_ppb = NOx_ppb[- grep("ppb", NOx_ppb$NO2),]
#convert all the NO columns from strings to numerical values for calculations
NOx_ppb[, 3:5] <- sapply(NOx_ppb[, 3:5], as.numeric)
#is.numeric(NOx_ppb$NO2)
#convert NO from ppb to ugm3. molecular weight is 30. formula is ppb x moleucular weight/22.41
NOx_ppb$NO = NOx_ppb$NO * (30/22.41)
#convert NO2 from ppb to ugm3. molecular weight is 46. formula is ppb x moleucular weight/22.41
NOx_ppb$NO2 = NOx_ppb$NO2 * (46/22.41)
#convert NOX from ppb to ugm3. molecular weight is 46. formula is ppb x moleucular weight/22.41
NOx_ppb$NOx = NOx_ppb$NOx * (46/22.41)
#NOx files import
NOx_files = dir(pattern = "NOx.*ugm3")
NOx_list = lapply(NOx_files, read_xls)
NOx = do.call(rbind, NOx_list)
#clear rows with ppb written in them
NOx = NOx[- grep("ug/m3", NOx$NO2),]
#change NO2 to numeric format for binding
NOx$NO2 = as.numeric(NOx$NO2)
#rowbind data with missing columns
library(data.table)
NOx = bind_rows(NOx, NOx_ppb)
#avoid atomic vector errors
NOx_hr = NOx
#combine date and hour columns
NOx_hr$Date <- with(NOx_hr, as.POSIXct(paste(NOx_hr$Date, NOx_hr$Hour), format="%Y-%m-%d %H"))
NOx_hr$Hour = NULL
#merge the datasets
Crumlin_Benzene_NOx_ugm3_hr = Reduce(function(x, y) merge(x, y, all=TRUE), list(NOx_hr, Benzene_hr))
#clean the data
Crumlin_Benzene_NOx_ugm3_hr = Crumlin_Benzene_NOx_ugm3_hr[- grep("ug/m3", Crumlin_Benzene_NOx_ugm3_hr$Benzene),]
#save the data
write_csv(Crumlin_Benzene_NOx_ugm3_hr, "../Gathered_Data/Dublin_Crumlin_Benzene_NOx_ugm3_hr.csv")
#NOx
#change from string to numeric values
Crumlin_Benzene_NOx_ugm3_hr$NOx = as.numeric(Crumlin_Benzene_NOx_ugm3_hr$NOx)
Crumlin_Benzene_NOx_ugm3_hr$NO = as.numeric(Crumlin_Benzene_NOx_ugm3_hr$NO)
Crumlin_Benzene_NOx_ugm3_hr$NO2 = as.numeric(Crumlin_Benzene_NOx_ugm3_hr$NO2)
Crumlin_Benzene_NOx_ugm3_hr$Benzene = as.numeric(Crumlin_Benzene_NOx_ugm3_hr$Benzene)
#Mean min and max
mean = aggregate(Crumlin_Benzene_NOx_ugm3_hr[names(Crumlin_Benzene_NOx_ugm3_hr)!='Date'], list(hour=cut(Crumlin_Benzene_NOx_ugm3_hr$Date,'day')), mean, na.rm=F)
names(mean) <- gsub("NOx", "NOx_Mean", names(mean))
names(mean) <- gsub("NO$", "NO_Mean", names(mean))
names(mean) <- gsub("NO2", "NO2_Mean", names(mean))
names(mean) <- gsub("Benzene", "Benzene_Mean", names(mean))
min = aggregate(Crumlin_Benzene_NOx_ugm3_hr[names(Crumlin_Benzene_NOx_ugm3_hr)!='Date'], list(hour=cut(Crumlin_Benzene_NOx_ugm3_hr$Date,'day')), min, na.rm=F)
names(min) <- gsub("NOx", "NOx_Min", names(min))
names(min) <- gsub("NO$", "NO_Min", names(min))
names(min) <- gsub("NO2", "NO2_Min", names(min))
names(min) <- gsub("Benzene", "Benzene_Min", names(min))
max = aggregate(Crumlin_Benzene_NOx_ugm3_hr[names(Crumlin_Benzene_NOx_ugm3_hr)!='Date'], list(hour=cut(Crumlin_Benzene_NOx_ugm3_hr$Date,'day')), max, na.rm=F)
names(max) <- gsub("NOx", "NOx_Max", names(max))
names(max) <- gsub("NO$", "NO_Max", names(max))
names(max) <- gsub("NO2", "NO2_Max", names(max))
names(max) <- gsub("Benzene", "Benzene_Max", names(max))
#merge the data
Crumlin_Benzene_NOx_hr_MMM_daily = Reduce(function(x, y) merge(x, y, all=TRUE), list(mean,min,max))
#change column name to date
colnames(Crumlin_Benzene_NOx_hr_MMM_daily)[1] = "Date"
#remove hours from data
Crumlin_Benzene_NOx_hr_MMM_daily$Date = as.Date(Crumlin_Benzene_NOx_hr_MMM_daily$Date,format='%Y-%m-%d %H')
#save the converted hourly data
write_csv(Crumlin_Benzene_NOx_hr_MMM_daily, "../Gathered_Data/Dublin_Crumlin_Benzene_NOx_hr_MMM_daily.csv")
#clean the enviroment
rm(list=ls())
#DAVITT ROAD DAILY --------------------------------------------------------------------------------
setwd('..')
setwd("DavittRd/")
#bind the PM10 files
PM10_files = dir(pattern = "*PM10")
PM10_list = lapply(PM10_files, read_xlsx)
PM10 = do.call(rbind, PM10_list)
#remove headers
PM10 = PM10[- grep("ug/m3", PM10$PM10),]
#swap year and date in the time column
PM10$Date = parse_date_time(PM10$Date, c('dmy', 'ymd'))
#save the data
write_csv(PM10,"../Gathered_Data/Dublin_DavittRd_PM10_ugm3_daily.csv")
#clean the enviroment
rm(list=ls())
#DUNLAOIGHAIRE HOURLY--------------------------------------------------------------------------------
setwd('..')
setwd("DunLaoighaire/")
# #convert xls files to csv files and delete the xls files
#already run so commenting out
# xls <- dir(pattern = "(.*)xls$")
# created <- mapply(convert, xls, gsub("xls", "csv", xls))
# unlink(xls)
#
# #convert xlsx files to csv files and delete the xlsx files
# xlsx <- dir(pattern = "(.*)xlsx$")
# created <- mapply(convert, xlsx, gsub("xlsx", "csv", xlsx))
# unlink(xlsx)
#NOx hourly data
#search for ppb NOX files using logical OR statement for conversion
NOx_ppb_files = dir(pattern = "NOx.*ppb|ppb.*NOx")
NOx_ppb_list = lapply(NOx_ppb_files, read_csv)
NOx_ppb = do.call(rbind, NOx_ppb_list)
#clear rows with ppb written in them
NOx_ppb = NOx_ppb[- grep("ppb", NOx_ppb$NO2),]
#convert all the NO columns from strings to numerical values for calculations
NOx_ppb[, 3:5] <- sapply(NOx_ppb[, 3:5], as.numeric)
#convert NO from ppb to ugm3. molecular weight is 30. formula is ppb x moleucular weight/22.41
NOx_ppb$NO = NOx_ppb$NO * (30/22.41)
#convert NO2 from ppb to ugm3. molecular weight is 46. formula is ppb x moleucular weight/22.41
NOx_ppb$NO2 = NOx_ppb$NO2 * (46/22.41)
#convert NOX from ppb to ugm3. molecular weight is 46. formula is ppb x moleucular weight/22.41
NOx_ppb$NOx = NOx_ppb$NOx * (46/22.41)
#search for ugm3 NOX files using logical OR statement
NOx_files = dir(pattern = "NOx.*ugm3|ugm3.*NOx")
NOx_list = lapply(NOx_files, read_csv)
NOx = do.call(rbind, NOx_list)
#clear rows with ppb written in them
NOx = NOx[- grep("ug/m3", NOx$NO2),]
#combine both type of files into one
NOx_final = rbind(NOx,NOx_ppb)
#Combine the date and the time for the different pollutants to help with graphing
NOx_final$Date <- with(NOx, as.POSIXct(paste(NOx_final$Date, NOx_final$Time), format="%Y-%m-%d %H"))
NOx_final$Time = NULL
#save the data
write_csv(NOx_final,"../Gathered_Data/Dublin_DunLaoighaire_NOx_ugm3_hr.csv")
#calculate min/max/mean for NOx
#change from string to numeric values
NOx = NOx_final
NOx$NOx = as.numeric(NOx$NOx)
NOx$NO = as.numeric(NOx$NO)
NOx$NO2 = as.numeric(NOx$NO2)
mean = aggregate(NOx[names(NOx)!='Date'], list(hour=cut(NOx$Date,'day')), mean, na.rm=F)
names(mean) <- gsub("NOx", "NOx_Mean", names(mean))
names(mean) <- gsub("NO$", "NO_Mean", names(mean))
names(mean) <- gsub("NO2", "NO2_Mean", names(mean))
min = aggregate(NOx[names(NOx)!='Date'], list(hour=cut(NOx$Date,'day')), min, na.rm=F)
names(min) <- gsub("NOx", "NOx_Min", names(min))
names(min) <- gsub("NO$", "NO_Min", names(min))
names(min) <- gsub("NO2", "NO2_Min", names(min))
max = aggregate(NOx[names(NOx)!='Date'], list(hour=cut(NOx$Date,'day')), max, na.rm=F)
names(max) <- gsub("NOx", "NOx_Max", names(max))
names(max) <- gsub("NO$", "NO_Max", names(max))
names(max) <- gsub("NO2", "NO2_Max", names(max))
#merge the data with the CO data
Dunlaoighaire_NOx_hr_MMM_daily = Reduce(function(x, y) merge(x, y, all=TRUE), list(mean,min,max))
#change column name to date
colnames(Dunlaoighaire_NOx_hr_MMM_daily)[1] = "Date"
#remove hours from data
Dunlaoighaire_NOx_hr_MMM_daily$Date = as.Date(Dunlaoighaire_NOx_hr_MMM_daily$Date,format='%Y-%m-%d %H')
#save the converted hourly data
write_csv(Dunlaoighaire_NOx_hr_MMM_daily, "../Gathered_Data/Dublin_DunLaoighaire_NOx_hr_MMM_daily.csv")
#clean the enviroment
rm(list=ls())
#DUNLAOIGHAIRE DAILY--------------------------------------------------------------------------------
#PM10 daily data
#bind the PM10 files
PM10_files = dir(pattern = "*PM10")
PM10_list = lapply(PM10_files, read_csv)
PM10 = do.call(rbind, PM10_list)
#remove headers
PM10 = PM10[- grep("ug/m3", PM10$PM10),]
#remove hours from data
PM10$Date = as.Date(PM10$Date,format='%Y-%m-%d %H')
#save the data
write_csv(PM10, "../Gathered_Data/Dublin_DunLaoighaire_PM10_ugm3_daily.csv")
#clean the enviroment
rm(list=ls())
#FINGLAS DAILY --------------------------------------------------------------------------------
setwd('..')
setwd("Finglas/")
#search for ugm3 PM25 files using logical OR statement
PM25_files = dir(pattern = "PM25")
PM25_list = lapply(PM25_files, read_xlsx)
PM25 = do.call(rbind, PM25_list)
#remove headers
PM25 = PM25[- grep("ug/m3", PM25$PM2.5),]
#save the data
write_csv(PM25, "../Gathered_Data/Dublin_Finglas_PM25_ugm3_daily.csv")
#clean the enviroment
rm(list=ls())
#KILBARRACK --------------------------------------------------------------------------------
setwd('..')
setwd("Kilbarrack/")
#search for ugm3 Pb files using logical OR statement
Pb_files = dir(pattern = "Pb")
Pb_list = lapply(Pb_files, read_xls)
Pb = do.call(rbind, Pb_list)
#remove headers
Pb = Pb[- grep("ug/m3", Pb$Pb),]
#save the data
write_csv(Pb, "../Gathered_Data/Dublin_Kilbarrack_Pb_ugm3_daily.csv")
#clean the enviroment
rm(list=ls())
#KNOCKLYON DAILY--------------------------------------------------------------------------------
setwd('..')
setwd("Knocklyon/")
#daily data
#Arsenic, As data and remove header
As <- read_excel("Dublin_Knocklyon_As_2008_ngm3_day.xls")
As = As[- grep("ng/m3", As$As),]
#Cadmium, Cd import and clean header data
Cd <- read_excel("Dublin_Knocklyon_Cd_2008_ngm3_day.xls")
Cd = Cd[- grep("ng/m3", Cd$Cd),]
#Nickel, Ni data and remove header
Ni <- read_excel("Dublin_Knocklyon_Ni_2008_ngm3_day.xls")
Ni = Ni[- grep("ng/m3", Ni$Ni),]
#import lead data, Pb, and remove the headers
Pb <- read_excel("Dublin_Knocklyon_Pb_2008_ugm3_day.xls")
Pb = Pb[- grep("ug/m3", Pb$Pb),]
#import Pm10 data and remove the headers
PM10 <- read_excel("Dublin_Knocklyon_PM10_2008_ugm3_day.xls")
PM10 = PM10[- grep("ug/m3", PM10$PM10),]
#merge daily data
Dublin_Knocklyon_As_Cd_Ni_Pb_PM10_daily = Reduce(function(x, y) merge(x, y, all=TRUE), list(As,Cd,Ni,Pb,PM10))
#save the data
write_csv(Dublin_Knocklyon_As_Cd_Ni_Pb_PM10_daily, "../Gathered_Data/Dublin_Knocklyon_As_Cd_Ni_Pb_PM10_daily.csv")
#KNOCKLYON HOURLY--------------------------------------------------------------------------------
#CO
#search for and bind the CO files
CO_files = dir(pattern = "CO.*mgm3|mgm3.*CO")
CO_list = lapply(CO_files, read_xls)
CO = do.call(rbind, CO_list)
#clear old headers
CO = CO[- grep("mg/m3", CO$CO ),]
#import the ppm files and convert to mg/m3
CO_ppm_files = dir(pattern = "CO.*ppm|ppm.*CO")
CO_ppm_list = lapply(CO_ppm_files, read_xls)
CO_ppm = do.call(rbind, CO_ppm_list)
#clear old headers
CO_ppm = CO_ppm[- grep("ppm", CO_ppm$CO ),]
#convert ppm to mgm3. CO molecular weight is 28. convert from a string to numerical values
CO_ppm$CO = as.numeric(CO_ppm$CO)
CO_ppm$CO = CO_ppm$CO * (28/22.41)
#bind the 2 CO files
CO = rbind(CO,CO_ppm)
#NOx data, import and convert from ppb to ugm3
#search for ppb NOX files using logical OR statement for conversion
NOx_ppb_files = dir(pattern = "NOx.*ppb|ppb.*NOx")
NOx_ppb_list = lapply(NOx_ppb_files, read_xls)
NOx_ppb = do.call(rbind, NOx_ppb_list)
#clear rows with ppb written in them
NOx_ppb = NOx_ppb[- grep("ppb", NOx_ppb$NO2),]
#convert all the NO columns from strings to numerical values for calculations
NOx_ppb[, 3:5] <- sapply(NOx_ppb[, 3:5], as.numeric)
#convert NO from ppb to ugm3. molecular weight is 30. formula is ppb x moleucular weight/22.41
NOx_ppb$NO = NOx_ppb$NO * (30/22.41)
#convert NO2 from ppb to ugm3. molecular weight is 46. formula is ppb x moleucular weight/22.41
NOx_ppb$NO2 = NOx_ppb$NO2 * (46/22.41)
#convert NOX from ppb to ugm3. molecular weight is 46. formula is ppb x moleucular weight/22.41
NOx_ppb$NOx = NOx_ppb$NOx * (46/22.41)
NOx = NOx_ppb
#SO2
#search for ugm3 SO2 files using logical OR statement
SO2_files = dir(pattern = "SO2.*ugm3|ugm3.*SO2")
SO2_list = lapply(SO2_files, read_xls)
SO2 = do.call(rbind, SO2_list)
#search for ppb SO2 files using logical OR statement
SO2_ppb_files = dir(pattern = "SO2.*ppb|ppb.*SO2")
SO2_ppb_list = lapply(SO2_ppb_files, read_xls)
SO2_ppb = do.call(rbind, SO2_ppb_list)
#convert SO2 ppb data to ugm3 data for consistency
SO2_ppb$SO2 = as.numeric(SO2_ppb$SO2)
SO2_ppb$SO2 = SO2_ppb$SO2 * (64/22.41)
#bind SO2 data
SO2 = rbind(SO2, SO2_ppb)
#combine CO, NOx, SO2
#Dublin_Knocklyon_CO_NOx_SO2_hr = merge(CO, NOx)
#Dublin_Knocklyon_CO_NOx_SO2_hr = merge(Dublin_Knocklyon_CO_NOx_SO2_hr, SO2)
Dublin_Knocklyon_CO_NOx_SO2_hr = Reduce(function(x, y) merge(x, y, all=TRUE), list(CO, NOx, SO2))
#bind the time and the date columns
Dublin_Knocklyon_CO_NOx_SO2_hr$Date <- with(Dublin_Knocklyon_CO_NOx_SO2_hr, as.POSIXct(paste(Dublin_Knocklyon_CO_NOx_SO2_hr$Date, Dublin_Knocklyon_CO_NOx_SO2_hr$Time), format="%Y-%m-%d %H"))
Dublin_Knocklyon_CO_NOx_SO2_hr$Time = NULL
#save the data
write_csv(Dublin_Knocklyon_CO_NOx_SO2_hr, "../Gathered_Data/Dublin_Knocklyon_CO_NOx_SO2_hr.csv")
#calculate min/max/mean for the data
#change from string to numeric values
Dublin_Knocklyon_CO_NOx_SO2_hr$NOx = as.numeric(Dublin_Knocklyon_CO_NOx_SO2_hr$NOx)
Dublin_Knocklyon_CO_NOx_SO2_hr$NO = as.numeric(Dublin_Knocklyon_CO_NOx_SO2_hr$NO)
Dublin_Knocklyon_CO_NOx_SO2_hr$NO2 = as.numeric(Dublin_Knocklyon_CO_NOx_SO2_hr$NO2)
Dublin_Knocklyon_CO_NOx_SO2_hr$CO = as.numeric(Dublin_Knocklyon_CO_NOx_SO2_hr$CO)
Dublin_Knocklyon_CO_NOx_SO2_hr$SO2 = as.numeric(Dublin_Knocklyon_CO_NOx_SO2_hr$SO2)
mean = aggregate(Dublin_Knocklyon_CO_NOx_SO2_hr[names(Dublin_Knocklyon_CO_NOx_SO2_hr)!='Date'], list(hour=cut(Dublin_Knocklyon_CO_NOx_SO2_hr$Date,'day')), mean, na.rm=F)
names(mean) <- gsub("NOx", "NOx_Mean", names(mean))
names(mean) <- gsub("NO$", "NO_Mean", names(mean))
names(mean) <- gsub("NO2", "NO2_Mean", names(mean))
names(mean) <- gsub("CO", "CO_Mean", names(mean))
names(mean) <- gsub("SO2", "SO2_Mean", names(mean))
min = aggregate(Dublin_Knocklyon_CO_NOx_SO2_hr[names(Dublin_Knocklyon_CO_NOx_SO2_hr)!='Date'], list(hour=cut(Dublin_Knocklyon_CO_NOx_SO2_hr$Date,'day')), min, na.rm=F)
names(min) <- gsub("NOx", "NOx_Min", names(min))
names(min) <- gsub("NO$", "NO_Min", names(min))
names(min) <- gsub("NO2", "NO2_Min", names(min))
names(min) <- gsub("CO", "CO_Min", names(min))
names(min) <- gsub("SO2", "SO2_Min", names(min))
max = aggregate(Dublin_Knocklyon_CO_NOx_SO2_hr[names(Dublin_Knocklyon_CO_NOx_SO2_hr)!='Date'], list(hour=cut(Dublin_Knocklyon_CO_NOx_SO2_hr$Date,'day')), max, na.rm=F)
names(max) <- gsub("NOx", "NOx_Max", names(max))
names(max) <- gsub("NO$", "NO_Max", names(max))
names(max) <- gsub("NO2", "NO2_Max", names(max))
names(max) <- gsub("CO", "CO_Max", names(max))
names(max) <- gsub("SO2", "SO2_Max", names(max))
#merge the data with the CO data
Dublin_Knocklyon_CO_NOx_SO2_hr_MMM_daily = Reduce(function(x, y) merge(x, y, all=TRUE), list(mean,min,max))
#change column name to date
colnames(Dublin_Knocklyon_CO_NOx_SO2_hr_MMM_daily)[1] = "Date"
#remove hours from data
Dublin_Knocklyon_CO_NOx_SO2_hr_MMM_daily$Date = as.Date(Dublin_Knocklyon_CO_NOx_SO2_hr_MMM_daily$Date,format='%Y-%m-%d %H')
#save the converted hourly data
write_csv(Dublin_Knocklyon_CO_NOx_SO2_hr_MMM_daily, "../Gathered_Data/Dublin_Knocklyon_CO_NOx_SO2_hr_MMM_daily.csv")
#clean the enviroment
rm(list=ls())
#MARINO HOURLY DATA --------------------------------------------------------------------------------
setwd('..')
setwd("Marino/")
#2001 data
#benzene
Benzene <- read_table2("Dublin_Marino_Benzene_TR_2001.txt", col_types = cols(X4 = col_skip()))
#CO
CO <- read_table2("Dublin_Marino_CO_TR_2001.txt", col_types = cols(X4 = col_skip()))
#NOx
NOx <- read_table2("Dublin_Marino_NOx_TR_2001.txt", col_types = cols(X6 = col_skip()))
#SO2
SO2 <- read_table2("Dublin_Marino_SO2_TR_2001.txt", col_types = cols(X4 = col_skip()))
#merge 2001 data
Dublin_Marino_Benzene_CO_NOx_SO2_2001_hr = Reduce(function(x, y) merge(x, y, all=TRUE), list(Benzene, CO, NOx, SO2))
#set date column as a date R will recognise
Dublin_Marino_Benzene_CO_NOx_SO2_2001_hr$Date <-parse_date_time(Dublin_Marino_Benzene_CO_NOx_SO2_2001_hr$Date, orders = c("dmy"))
#Combine the date and the time columns
Dublin_Marino_Benzene_CO_NOx_SO2_2001_hr$Date <- with(Dublin_Marino_Benzene_CO_NOx_SO2_2001_hr, as.POSIXct(paste(Dublin_Marino_Benzene_CO_NOx_SO2_2001_hr$Date, Dublin_Marino_Benzene_CO_NOx_SO2_2001_hr$Time), format="%Y-%m-%d %H"))
Dublin_Marino_Benzene_CO_NOx_SO2_2001_hr$Time = NULL
#export the data, no data overlaps so 3 different files for this area
write_csv(Dublin_Marino_Benzene_CO_NOx_SO2_2001_hr, "../Gathered_Data/Dublin_Marino_Benzene_CO_NOx_SO2_2001_hr.csv")
#check for rows with complete data
#Dublin_Marino_Benzene_CO_NOx_SO2_2001_hr[complete.cases(Dublin_Marino_Benzene_CO_NOx_SO2_2001_hr), ]
#too little data, not worth getting min/max/max
#MARINO DAILY DATA --------------------------------------------------------------------------------
#PM2.5
#bind the PM2.5 files
PM25_files = dir(pattern = "*PM25")
PM25_list = lapply(PM25_files, read_xlsx)
PM25 = do.call(rbind, PM25_list)
#clean the data of headers
PM25 = PM25[- grep("ug/m3", PM25$PM2.5),]
#PM10 data
#import txt data first
PM10_files = dir(pattern = "*PM10(.*)txt$")
PM10_list = lapply(PM10_files, read_table2)
PM10 = do.call(rbind, PM10_list)
#change the column name for binding
colnames(PM10)[2] = "PM10"
#sort the dates out for merging with lubridate, the dates have different formats
mdy = mdy(PM10$Date)
dmy = dmy(PM10$Date)
mdy[is.na(mdy)] = dmy[is.na(mdy)]
PM10$Date = mdy
#import xls files
PM10_files = dir(pattern = "*PM10(.*)xls$")
PM10_list = lapply(PM10_files, read_xls)
PM10_xls = do.call(rbind, PM10_list)
#bind the two data sets
PM10 = rbind(PM10,PM10_xls)
#remove hours from data
PM25$Date = as.Date(PM25$Date,format='%Y-%m-%d %H')
#export the data, no data overlaps so different files for this area
write_csv(PM10, "../Gathered_Data/Dublin_Marino_PM10_daily.csv")
write_csv(PM25, "../Gathered_Data/Dublin_Marino_PM25_daily.csv")
#clean the enviroment
rm(list=ls())
#PHOENIX PARK DAILY --------------------------------------------------------------------------------
setwd('..')
setwd("PhoenixPark/")
#import txt data first
PM10_files = dir(pattern = "*PM10(.*)txt$")
PM10_list = lapply(PM10_files, read_table2)
PM10 = do.call(rbind, PM10_list)
#change the column name for binding
colnames(PM10)[2] = "PM10"
#rearrange the date for binding
mdy = mdy(PM10$Date)
dmy = dmy(PM10$Date)
mdy[is.na(mdy)] = dmy[is.na(mdy)]
PM10$Date = mdy
#import xls data
PM10_files = dir(pattern = "*PM10(.*)xls$")
PM10_list = lapply(PM10_files, read_xls)
PM10_xls = do.call(rbind, PM10_list)
#import xls data
PM10_files = dir(pattern = "*PM10(.*)xlsx$")
PM10_list = lapply(PM10_files, read_xlsx)
PM10_xlsx = do.call(rbind, PM10_list)
#bind the data
PM10 = rbind(PM10, PM10_xls, PM10_xlsx)
#sort the data by date
PM10 = PM10[order(as.Date(PM10$Date, format="%Y/%m/%d")),]
#clean the data of headers
PM10 = PM10[- grep("ug/m3", PM10$PM10),]
#save the gathered data
write_csv(PM10, "../Gathered_Data/Dublin_PhoenixPark_PM10_daily.csv")
#clean the enviroment
rm(list=ls())
#RATHMINES MONTHLY --------------------------------------------------------------------------------
#MONTHLY DATA
setwd('..')
setwd("Rathmines/")
#Asrsenic, AS
#import xls data
As_files = dir(pattern = "As")
As_list = lapply(As_files, read_xlsx)
As = do.call(rbind, As_list)
#clean the data
As = As[- grep("ng/m3", As$As),]
#create a date column
As$Date = seq(as.Date("2009/1/1"), by = "month", length.out = nrow(As))
#remove old date months and replace with new date column and swap so the date column is first
As$X__1 = NULL
As = As[,c(2,1)]
#BaP
BaP_files = dir(pattern = "BaP")
BaP_list = lapply(BaP_files, read_xlsx)
BaP = do.call(rbind, BaP_list)
#clean the data
BaP = BaP[- grep("ng/m3", BaP$`B(a)P`),]
#create a date column
BaP$Date = seq(as.Date("2010/1/1"), by = "month", length.out = nrow(BaP))
#remove old date months and replace with new date column and swap so the date column is first
BaP$X__1 = NULL
BaP = BaP[,c(2,1)]
#Cadmium, Cd
Cd_files = dir(pattern = "Cd")
Cd_list = lapply(Cd_files, read_xlsx)
Cd = do.call(rbind, Cd_list)
#clean the data
Cd = Cd[- grep("ng/m3", Cd$Cd ),]
#create a date column
Cd$Date = seq(as.Date("2009/1/1"), by = "month", length.out = nrow(Cd))
#remove old date months and replace with new date column and swap so the date column is first
Cd$X__1 = NULL
Cd = Cd[,c(2,1)]
#Nickel, Ni
Ni_files = dir(pattern = "Ni")
Ni_list = lapply(Ni_files, read_xlsx)
Ni = do.call(rbind, Ni_list)
#clean the data
Ni = Ni[- grep("ng/m3", Ni$Ni ),]
#create a date column
Ni$Date = seq(as.Date("2009/1/1"), by = "month", length.out = nrow(Ni))
#remove old date months and replace with new date column and swap so the date column is first
Ni$X__1 = NULL
Ni = Ni[,c(2,1)]
#Lead, Pb
#library(readbulk)
#import xlsx files - monthly data
Pb_files = dir(pattern = "*Pb(.*)xlsx$")
Pb_list = lapply(Pb_files, read_xlsx)
Pb = do.call(rbind, Pb_list)
#clean the data
Pb = Pb[- grep("ng/m3", Pb$Pb ),]
#create a date column
Pb$Date = seq(as.Date("2009/1/1"), by = "month", length.out = nrow(Pb))
#remove old date months and replace with new date column and swap so the date column is first
Pb$X__1 = NULL
Pb = Pb[,c(2,1)]
#merge monthly data
Dublin_Rathmines_As_BaP_Cd_Ni_Pb_monthly = Reduce(function(x, y) merge(x, y, all=TRUE), list(As, BaP, Cd, Ni, Pb))
#change the date format to just year/month
Dublin_Rathmines_As_BaP_Cd_Ni_Pb_monthly$Date <- format(as.Date(Dublin_Rathmines_As_BaP_Cd_Ni_Pb_monthly$Date), "%Y-%m")
#save the gathered data
write_csv(Dublin_Rathmines_As_BaP_Cd_Ni_Pb_monthly, "../Gathered_Data/Dublin_Rathmines_As_BaP_Cd_Ni_Pb_monthly.csv")
#clean the enviroment
rm(list=ls())
#RATHMINES DAILY------------------------------------------------------------------
#PB daily files
#import xls files- daily date
Pb_files = dir(pattern = "*Pb(.*)xls$")
Pb_list = lapply(Pb_files, read_xls)
Pb = do.call(rbind, Pb_list)
#clean the data
Pb = Pb[- grep("ug/m3", Pb$Pb ),]
Pb$Date = as.Date(Pb$Date)
#Benzene hourly
Benzene_files = dir(pattern = "*Benzene.*hr")
Benzene_list = lapply(Benzene_files, read_xls)
Benzene_hr = do.call(rbind, Benzene_list)
#clean the data
Benzene_hr = Benzene_hr[- grep("ug/m3", Benzene_hr$Benzene ),]
Benzene_hr$Time = NULL
#calcuate the daily average for the hourly dataset
Benzene_hr$Date = as.Date(Benzene_hr$Date)
Benzene_hr$Benzene = as.numeric(Benzene_hr$Benzene)
Benzene_daily = aggregate(cbind(Benzene_hr$Benzene) ~ Date, Benzene_hr, mean)
#change the column name for binding
colnames(Benzene_daily)[2] = "Benzene"
#Benzene daily files xls
Benzene_files = dir(pattern = "*Benzene.*day(.*)xls$")
Benzene_list = lapply(Benzene_files, read_xls)
Benzene = do.call(rbind, Benzene_list)
#benzene files xlsx
Benzene_files = dir(pattern = "*Benzene.*day(.*)xlsx$")
Benzene_list = lapply(Benzene_files, read_xlsx)
Benzene_xlxs = do.call(rbind, Benzene_list)
#bind the coverted hourly data and the xls and xlxs files
Benzene = rbind(Benzene_daily, Benzene, Benzene_xlxs)
#plot(Benzene)
#sort the data by date
Benzene = Benzene[order(as.Date(Benzene$Date, format="%Y/%m/%d")),]
#clean the data
Benzene = Benzene[- grep("ug/m3", Benzene$Benzene ),]
#Ethylbenzene
Ethylbenzene_files = dir(pattern = "*ethylbenzene")
Ethylbenzene_list = lapply(Ethylbenzene_files, read_xlsx)
#rows wouldnt bind due to lowercase 'e' in the header of the 2010 file. Changed this manually in excel
#bind the data
Ethylbenzene = do.call(rbind, Ethylbenzene_list)
#clean the data
Ethylbenzene = Ethylbenzene[- grep("ug/m3", Ethylbenzene$Ethylbenzene ),]
#MP Xylene
#import the data and change the column names fpr binding
Xylene_files = dir(pattern = "*MP_xylene.*ppb(.*)xlsx$")
Xylene_list = lapply(Xylene_files, read_xlsx)
colnames = c("Date", "m,p xylene")
Xylene_list <- lapply(Xylene_list, setNames, colnames)
Xylene_ppb = do.call(rbind, Xylene_list)
#convert from ppb to ugm3
Xylene_ppb$`m,p xylene`= as.numeric(Xylene_ppb$`m,p xylene`)
Xylene_ppb$`m,p xylene` = Xylene_ppb$`m,p xylene` * (106/22.41)
#import ugm3 files
Xylene_files = dir(pattern = "mp_xylene.*ugm3(.*)xlsx$")
Xylene_list = lapply(Xylene_files, read_xlsx)
#rename columns for binding and then bind the files
Xylene_list <- lapply(Xylene_list, setNames, colnames)
mp_Xylene = do.call(rbind, Xylene_list)
#bind the ppb and ugm3 datasets
mp_Xylene = rbind(mp_Xylene, Xylene_ppb)
#clean the data
mp_Xylene = mp_Xylene[- grep("ug/m3", mp_Xylene$`m,p xylene` ),]
##sort the data by date
mp_Xylene = mp_Xylene[order(as.Date(mp_Xylene$Date, format="%Y/%m/%d")),]
#o_Xylene
#ppb file import and conversion to ugm3
oXylene_files = dir(pattern = "*o_xylene.*ppb")
oXylene_list = lapply(oXylene_files, read_xlsx)
oXylene_ppb = do.call(rbind, oXylene_list)
#convert from ppb to ugm3. convert from sting to numerals
oXylene_ppb$`o-xylene` = as.numeric(oXylene_ppb$`o-xylene`) * (106/22.41)
#change column name for binding
colnames(oXylene_ppb)[2] = "oXylene"
#read data in
oXylene_files = dir(pattern = "*o_xylene.*ugm3")
oXylene_list = lapply(oXylene_files, read_xlsx)
#change the header names so they are all the same for binding
colnames = c("Date", "oXylene")
oXylene_list <- lapply(oXylene_list, setNames, colnames)
oXylene = do.call(rbind, oXylene_list)
#bind the ppb and ugm3 datasets
oXylene = rbind(oXylene, oXylene_ppb)
#clean the data
oXylene = oXylene[- grep("ug/m3", oXylene$oXylene ),]
##sort the data by date
oXylene = oXylene[order(as.Date(oXylene$Date, format="%Y/%m/%d")),]
#PM10 files
#txt
PM10_files = dir(pattern = "*PM10.*(.*)txt$")
PM10_list = lapply(PM10_files, read_table2)
PM10_txt = do.call(rbind, PM10_list)
colnames(PM10_txt)[2] = "PM10"
#change the date format for binding
PM10_txt$Date = parse_date_time(PM10_txt$Date, c('dmy', 'ymd'))
#xls
PM10_files = dir(pattern = "*PM10.*(.*)xls$")
PM10_list = lapply(PM10_files, read_xls)
PM10_xls = do.call(rbind, PM10_list)
#xlsx
PM10_files = dir(pattern = "*PM10.*(.*)xlsx$")
PM10_list = lapply(PM10_files, read_xlsx)
PM10_xlsx = do.call(rbind, PM10_list)
#bind all data together
PM10 = rbind(PM10_txt, PM10_xls, PM10_xlsx)
#clean the data
PM10 = PM10[- grep("ug/m3", PM10$PM10 ),]
#convert 2003 file from hourly to daily data to file in a gap in the data
PM10_2003_hr <- read_excel("Dublin_Rathmines_pm10_2003_hr.xls")
PM10_2003_hr$Hour = NULL
#clean the data
PM10_2003_hr = PM10_2003_hr[- grep("ug/m3", PM10_2003_hr$PM10 ),]
PM10_2003_hr$Date = as.Date(PM10_2003_hr$Date)
#compute daily averages
PM10_2003_hr$PM10 = as.numeric(PM10_2003_hr$PM10)
#find nas in case they are causing issues when trying to calculate the daily average
sum(is.na(PM10_2003_hr$Date))
#remove nas
PM10_2003_hr = PM10_2003_hr[!is.na(PM10_2003_hr$Date), ]
#calcuate the daily average
PM10_2003_daily = aggregate(cbind(PM10_2003_hr$PM10) ~ Date, PM10_2003_hr, mean)
#combine covereted hourly data with the rest of the daily data
colnames(PM10_2003_daily)[2] = "PM10"
PM10 = rbind(PM10, PM10_2003_daily)
#sort the data by date
PM10 = PM10[order(as.Date(PM10$Date, format="%Y/%m/%d")),]
#plot(PM10)
#PM25
#import PM25 files
PM25_files = dir(pattern = "*PM25.*(.*)xlsx$")
PM25_list = lapply(PM25_files, read_xlsx)
#rename columns for binding and then bind the files
colnames = c("Date", "PM25")
PM25_list <- lapply(PM25_list, setNames, colnames)
PM25 = do.call(rbind, PM25_list)
#Toulene
Toluene_files = dir(pattern = "*Toluene")
Toluene_list = lapply(Toluene_files, read_xlsx)
Toluene = do.call(rbind, Toluene_list)
#clean the data
Toluene = Toluene[- grep("ug/m3", Toluene$Toluene ),]
Benzene$Date = as.Date(Benzene$Date, format = "%Y-%m-%d")
#merge the daily data together
Dublin_Rathmines_Benzene_Ethlybenzene_mpXylene_oXylene_Pb_Toluene_daily = Reduce(function(x, y) merge(x, y, all=TRUE), list(Ethylbenzene, mp_Xylene, oXylene, Pb, Toluene))
#Dublin_Rathmines_Benzene_Ethlybenzene_mpXylene_oXylene_Pb_Toluene_daily = Reduce(function(x, y) merge(x, y, all=TRUE), list(Benzene, Ethylbenzene, mp_Xylene, oXylene, Pb, Toluene))
#TODO
#something weird with benzene- cant work it out come back!
#Dublin_Rathmines_Benzene_Ethlybenzene_mpXylene_oXylene_Pb_Toluene_daily = Reduce(function(x, y) merge(x, y, all=TRUE), list(Dublin_Rathmines_Benzene_Ethlybenzene_mpXylene_oXylene_Pb_Toluene_daily, Benzene))
#save the gathered data
write_csv(Dublin_Rathmines_Benzene_Ethlybenzene_mpXylene_oXylene_Pb_Toluene_daily,"../Gathered_Data/Dublin_Rathmines_Benzene_Ethlybenzene_mpXylene_oXylene_Pb_Toluene_daily.csv")
#merge the daily data together for PM
Dublin_Rathmines_PM10_PM25_daily = Reduce(function(x, y) merge(x, y, all=TRUE), list(PM10, PM25))
#clean the data
Dublin_Rathmines_PM10_PM25_daily = Dublin_Rathmines_PM10_PM25_daily[- grep("ug/m3", Dublin_Rathmines_PM10_PM25_daily$PM25 ),]
#remove hours from data
Dublin_Rathmines_PM10_PM25_daily$Date = as.Date(Dublin_Rathmines_PM10_PM25_daily$Date,format='%Y-%m-%d %H')
#save the gathered data
write_csv(Dublin_Rathmines_PM10_PM25_daily, "../Gathered_Data/Dublin_Rathmines_PM10_PM25_daily.csv")
#clean the enviroment
rm(list=ls())
#RATHMINES HOURLY --------------------------------------------------------------------
#NOx
#covert the ppb files to ugm3
#NOx text files
NOx_files = dir(pattern = "*NOx*(.*)txt")
NOx_list = lapply(NOx_files, read_table2)
NOx_txt = do.call(rbind, NOx_list)
#have to change Headers of the txt files so they match for binding with the xls and xlsx files
colnames(NOx_txt)[2] = "Time"
colnames(NOx_txt)[3] = "NOx"
colnames(NOx_txt)[4] = "NO"
colnames(NOx_txt)[5] = "NO2"
#change the date format for binding
NOx_txt$Date = parse_date_time(NOx_txt$Date, c('dmy', 'ymd'))
#Combine the date and the time columns
NOx_txt$Date <- with(NOx_txt, as.POSIXct(paste(NOx_txt$Date, NOx_txt$Time), format="%Y-%m-%d %H"))
NOx_txt$Time = NULL
#NOx xls files
NOx_files = dir(pattern = "*NOx*(.*)xls$")
NOx_list = lapply(NOx_files, read_xls)
#have to change Hour column to Time and have to change headers Nox to NOx for binding
NOx_list <- lapply(NOx_list, function(x) setNames(x, gsub("^Hour$", "Time", names(x))) )
NOx_list <- lapply(NOx_list, function(x) setNames(x, gsub("^Nox$", "NOx", names(x))) )
#bind the files
NOx_xls = do.call(rbind, NOx_list)
#Combine the date and the time columns
NOx_xls$Date <- with(NOx_xls, as.POSIXct(paste(NOx_xls$Date, NOx_xls$Time), format="%Y-%m-%d %H"))
NOx_xls$Time = NULL
#clean the data
NOx_xls = NOx_xls[- grep("ppb", NOx_xls$NO ),]
#import the ppb xlxs file
NOx_xlsx <- read_excel("Dublin_Rathmines_NOx_2010_ppb_hr.xlsx")
#Combine the date and the time columns
NOx_xlsx$Date <- with(NOx_xlsx, as.POSIXct(paste(NOx_xlsx$Date, NOx_xlsx$Time), format="%Y-%m-%d %H"))
NOx_xlsx$Time = NULL
#bind all the ppb data
NOx_ppb = rbind(NOx_xls, NOx_txt, NOx_xlsx)
#convert all the NO columns from strings to numerical values for calculations
NOx_ppb[, 2:4] <- sapply(NOx_ppb[, 2:4], as.numeric)
#convert ppb to ugm3. NB NOx additions dont add up from NO and NO2 like in the newer datsets
#maybe as a result of the results being rounded to nearest numbers here
NOx_ppb$NO = NOx_ppb$NO * (30/22.41)
NOx_ppb$NO2 = NOx_ppb$NO2 * (46/22.41)
NOx_ppb$NOx = NOx_ppb$NOx * (46/22.41)
#import NOx ugm3 files
NOx_files = dir(pattern = "*NOx.*ugm3")
NOx_list = lapply(NOx_files, read_xlsx)
NOx_xlsx = do.call(rbind, NOx_list)
#clean the data
NOx_xlsx = NOx_xlsx[- grep("ug/m3", NOx_xlsx$NO ),]
#combine date and time columes
NOx_xlsx$Date <- with(NOx_xlsx, as.POSIXct(paste(NOx_xlsx$Date, NOx_xlsx$Time), format="%Y-%m-%d %H"))
NOx_xlsx$Time = NULL
#bind all of the NOx data
NOx = rbind(NOx_xlsx, NOx_ppb)
#plot(NOx)
#sort the data by date
NOx = NOx[order(as.Date(NOx$Date, format="%Y/%m/%d")),]
#OZONE HOURLY DATA
#import Ozone txt files
O3_files = dir(pattern = "*O3(.*)txt$")
O3_list = lapply(O3_files, read_table2)
O3_txt = do.call(rbind, O3_list)
#rename the columns for binding
colnames(O3_txt)[3] = "ozone"
#change the date structure
O3_txt$Date = parse_date_time(O3_txt$Date, c('dmy', 'ymd'))
#import the xls O3 files
O3_files = dir(pattern = "*O3(.*)xls$")
O3_list = lapply(O3_files, read_xls)
#alter headers that have hour instead of time
O3_list <- lapply(O3_list, function(x) setNames(x, gsub("^Hour$", "Time", names(x))) )
#bind the files in the list
O3_xls = do.call(rbind, O3_list)
#import the xlsx O3 ppb files
O3_files = dir(pattern = "*O3.*ppb(.*)xlsx$")
O3_list = lapply(O3_files, read_xlsx)
O3_xlsx_ppb = do.call(rbind, O3_list)
#convert ppb to ugm3
O3_xlsx_ppb$ozone = as.numeric(O3_xlsx_ppb$ozone) * (48/22.41)
#import xlxs ugm3 ozone files
O3_files = dir(pattern = "*O3.*ugm3(.*)xlsx$")
O3_list = lapply(O3_files, read_xlsx)
O3_xlsx = do.call(rbind, O3_list)
#bind all the ozone files
O3 = rbind(O3_xlsx, O3_xlsx_ppb, O3_xls, O3_txt)
#sort the data by date
O3 = O3[order(as.Date(O3$Date, format="%Y/%m/%d")),]
#clean the data
O3 = O3[- grep("ug/m3", O3$ozone ),]
O3 = O3[- grep("mg/m3", O3$ozone ),]
#Combine the date and the time columns
O3$Date <- with(O3, as.POSIXct(paste(O3$Date, O3$Time), format="%Y-%m-%d %H"))
O3$Time = NULL
#Suplur dioxide, SO2
#import the text files
SO2_files = dir(pattern = "*SO2(.*)txt$")
SO2_list = lapply(SO2_files, read_table2)
SO2_txt = do.call(rbind, SO2_list)
#rename the columns for binding
colnames(SO2_txt)[3] = "SO2"
#change the date structure
SO2_txt$Date = parse_date_time(SO2_txt$Date, c('dmy', 'ymd'))
#import the xls SO2 files
SO2_files = dir(pattern = "*SO2(.*)xls$")
SO2_list = lapply(SO2_files, read_xls)
SO2_xls = do.call(rbind, SO2_list)
#change the column name for binding
colnames(SO2_xls)[2] = "Time"
#import the xlsx SO2 ppb files
SO2_files = dir(pattern = "*SO2.*ppb(.*)xlsx$")
SO2_list = lapply(SO2_files, read_xlsx)
SO2_xlsx_ppb = do.call(rbind, SO2_list)
#convert ppb to ugm3
SO2_xlsx_ppb$SO2 = as.numeric(SO2_xlsx_ppb$SO2) * (64/22.41)
#import xlxs ugm3 SO2 files
SO2_files = dir(pattern = "*SO2.*ugm3(.*)xlsx$")
SO2_list = lapply(SO2_files, read_xlsx)
SO2_xlsx = do.call(rbind, SO2_list)
#bind all SO2 files
SO2 = rbind(SO2_txt, SO2_xls, SO2_xlsx_ppb, SO2_xlsx)
#clean the data
SO2 = SO2[- grep("ug/m3", SO2$SO2 ),]
#sort the data by date
SO2 = SO2[order(as.Date(SO2$Date, format="%Y/%m/%d")),]
#atomic vector issue when combinding the date and hour
SO2atomicvector = SO2
#Combine the date and the time columns
SO2atomicvector$Date <- with(SO2atomicvector, as.POSIXct(paste(SO2atomicvector$Date, SO2atomicvector$Time), format="%Y-%m-%d %H"))
SO2atomicvector$Time = NULL
SO2 = SO2atomicvector
#remove nas from Date columns to prevent a thousands of NAs being genereated when merging the datasets due to many permutuations of merging the NA rows
NOx = NOx[!is.na(NOx$Date), ]
SO2 = SO2[!is.na(SO2$Date), ]
O3 = O3[!is.na(O3$Date), ]
#combine the hourly datasets for Rathmines
#merge the daily data together
Dublin_Rathmines_NOx_O3_SO2_hr = Reduce(function(x, y) merge(x, y, all=TRUE), list(NOx, O3, SO2))
#save the gathered data
write_csv(Dublin_Rathmines_NOx_O3_SO2_hr, "../Gathered_Data/Dublin_Rathmines_NOx_O3_SO2_hr.csv")
#calculate min/max/mean for the data
#change from string to numeric values
Dublin_Rathmines_NOx_O3_SO2_hr$NOx = as.numeric(Dublin_Rathmines_NOx_O3_SO2_hr$NOx)
Dublin_Rathmines_NOx_O3_SO2_hr$NO = as.numeric(Dublin_Rathmines_NOx_O3_SO2_hr$NO)
Dublin_Rathmines_NOx_O3_SO2_hr$NO2 = as.numeric(Dublin_Rathmines_NOx_O3_SO2_hr$NO2)
Dublin_Rathmines_NOx_O3_SO2_hr$ozone = as.numeric(Dublin_Rathmines_NOx_O3_SO2_hr$ozone)
Dublin_Rathmines_NOx_O3_SO2_hr$SO2 = as.numeric(Dublin_Rathmines_NOx_O3_SO2_hr$SO2)
mean = aggregate(Dublin_Rathmines_NOx_O3_SO2_hr[names(Dublin_Rathmines_NOx_O3_SO2_hr)!='Date'], list(hour=cut(Dublin_Rathmines_NOx_O3_SO2_hr$Date,'day')), mean, na.rm=F)
names(mean) <- gsub("NOx", "NOx_Mean", names(mean))
names(mean) <- gsub("NO$", "NO_Mean", names(mean))
names(mean) <- gsub("NO2", "NO2_Mean", names(mean))
names(mean) <- gsub("ozone", "ozone_Mean", names(mean))
names(mean) <- gsub("SO2", "SO2_Mean", names(mean))
min = aggregate(Dublin_Rathmines_NOx_O3_SO2_hr[names(Dublin_Rathmines_NOx_O3_SO2_hr)!='Date'], list(hour=cut(Dublin_Rathmines_NOx_O3_SO2_hr$Date,'day')), min, na.rm=F)
names(min) <- gsub("NOx", "NOx_Min", names(min))
names(min) <- gsub("NO$", "NO_Min", names(min))
names(min) <- gsub("NO2", "NO2_Min", names(min))
names(min) <- gsub("ozone", "ozone_Min", names(min))
names(min) <- gsub("SO2", "SO2_Min", names(min))
max = aggregate(Dublin_Rathmines_NOx_O3_SO2_hr[names(Dublin_Rathmines_NOx_O3_SO2_hr)!='Date'], list(hour=cut(Dublin_Rathmines_NOx_O3_SO2_hr$Date,'day')), max, na.rm=F)
names(max) <- gsub("NOx", "NOx_Max", names(max))
names(max) <- gsub("NO$", "NO_Max", names(max))
names(max) <- gsub("NO2", "NO2_Max", names(max))
names(max) <- gsub("ozone", "ozone_Max", names(max))
names(max) <- gsub("SO2", "SO2_Max", names(max))
#merge the data with the CO data
Dublin_Rathmines_NOx_O3_SO2_MMM_hr = Reduce(function(x, y) merge(x, y, all=TRUE), list(mean,min,max))
#change column name to date
colnames(Dublin_Rathmines_NOx_O3_SO2_MMM_hr)[1] = "Date"
#remove hours from data
Dublin_Rathmines_NOx_O3_SO2_MMM_hr$Date = as.Date(Dublin_Rathmines_NOx_O3_SO2_MMM_hr$Date,format='%Y-%m-%d %H')
#save the converted hourly data
write_csv(Dublin_Rathmines_NOx_O3_SO2_MMM_hr, "../Gathered_Data/Dublin_Rathmines_NOx_O3_SO2_MMM_hr_MMM_daily.csv")
#clean the enviroment
rm(list=ls())
#RINGSEND DAILY --------------------------------------------------------------------------------
setwd('..')
setwd("Ringsend/")
#BENZENE
#import Benzene data
benzene_files = dir(pattern = "*Benzene")
benzene_list = lapply(benzene_files, read_xlsx)
Benzene = do.call(rbind, benzene_list)
#clean the data
Benzene = Benzene[- grep("ug/m3", Benzene$Benzene ),]
#PM10
#import xls files
PM10_files = dir(pattern = "PM10.*xls$")
PM10_list = lapply(PM10_files, read_xls)
PM10_xls = do.call(rbind, PM10_list)
#import xlsx files
PM10_files = dir(pattern = "PM10.*xlsx$")
PM10_list = lapply(PM10_files, read_xlsx)
PM10_xlsx = do.call(rbind, PM10_list)
#bind the files together. clean the data of headers
PM10 = rbind(PM10_xls, PM10_xlsx)
PM10 = PM10[- grep("ug/m3", PM10$PM10),]
#Toulene
#import xlsx files
Toluene_files = dir(pattern = "Toluene.*xlsx$")
Toluene_list = lapply(Toluene_files, read_xlsx)
Toluene = do.call(rbind, Toluene_list)
#clean the data of headers
Toluene = Toluene[- grep("ug/m3", Toluene$Toluene),]
#merge daily data for Ringsend
Dublin_Ringsend_Benzene_PM10_Toluene_daily = Reduce(function(x, y) merge(x, y, all=TRUE), list(Benzene, PM10, Toluene))
#remove hours from data
Dublin_Ringsend_Benzene_PM10_Toluene_daily$Date = as.Date(Dublin_Ringsend_Benzene_PM10_Toluene_daily$Date,format='%Y-%m-%d %H')
#save the gathered data
write_csv(Dublin_Ringsend_Benzene_PM10_Toluene_daily, "../Gathered_Data/Dublin_Ringsend_Benzene_PM10_Toluene_daily.csv")
#clean the enviroment
rm(list=ls())
#RINGSEND HOURLY --------------------------------------------------------------------------------
#CARBON MONOXIDE, CO
#import CO ppm data
CO_files = dir(pattern = "CO.*ppm|ppm.*CO")
CO_list = lapply(CO_files, read_xlsx)
CO_ppm = do.call(rbind, CO_list)
#convert ppm to mgm3. CO molecular weight is 28
CO_ppm$CO = as.numeric(CO_ppm$CO)
CO_ppm$CO = CO_ppm$CO * (28/22.41)
#import CO mgm3 data
CO_files = dir(pattern = "CO.*mgm3|ppm.*mgm3")
CO_list = lapply(CO_files, read_xlsx)
CO = do.call(rbind, CO_list)
#bind the 2 CO files
CO_final = rbind(CO,CO_ppm)
#combine the date and time columns
CO_final$Date <- with(CO_final, as.POSIXct(paste(CO_final$Date , CO_final$Time), format="%Y-%m-%d %H"))
CO_final$Time = NULL
#clean the data
CO_final = CO_final[- grep("mg/m3", CO_final$CO),]
#change name for consistency
CO = CO_final
#NOx
#NOx ppb files conversion
ppb_NOx = dir(pattern = "NOx.*ppb|ppb.*NOx")
NOx_list = lapply(ppb_NOx, read_xlsx)
NOx_ppb = do.call(rbind, NOx_list)
#clear rows with ppb written in them
NOx_ppb = NOx_ppb[- grep("ppb", NOx_ppb$NO2),]
#convert all the NO columns from strings to numerical values for calculations
NOx_ppb[, 3:5] <- sapply(NOx_ppb[, 3:5], as.numeric)
#is.numeric(NOx_ppb$NO2)
#convert from ppb to ugm3. molecular weight/22.41
NOx_ppb$NOx = NOx_ppb$NOx * (46/22.41)
NOx_ppb$NO = NOx_ppb$NO * (30/22.41)
NOx_ppb$NO2 = NOx_ppb$NO2 * (46/22.41)
#bind the NOx files chronologically
NOx_files = dir(pattern = "NOx.*ugm3|ugm3.*NOx")
NOx_list = lapply(NOx_files, read_xlsx)
NOx_ugm3 = do.call(rbind, NOx_list)
#clean the data of headings
NOx_ugm3 = NOx_ugm3[- grep("ug/m3", NOx_ugm3$NOx),]
#bind the ppm and ugm3 files
NOx_final = rbind(NOx_ppb, NOx_ugm3)
#combine the date and time columns
NOx_final$Date <- with(NOx_final, as.POSIXct(paste(NOx_final$Date , NOx_final$Time), format="%Y-%m-%d %H"))
NOx_final$Time = NULL
#change name for consistency-aids with atomic errors
NOx = NOx_final
#SO2
#search for ppb SO2 files using logical OR statement
SO2_ppb_files = dir(pattern = "SO2.*ppb|ppb.*SO2")
SO2_ppb_list = lapply(SO2_ppb_files, read_xlsx)
SO2_ppb = do.call(rbind, SO2_ppb_list)
#convert ppb to ugm3
SO2_ppb$SO2 = as.numeric(SO2_ppb$SO2) * (64/22.41)
#search for ugm3 NOX files using logical OR statement
SO2_files = dir(pattern = "SO2.*ugm3|ugm3.*SO2")
SO2_list = lapply(SO2_files, read_xlsx)
SO2_ugm3 = do.call(rbind, SO2_list)
#bind SO2 data
SO2 = rbind(SO2_ugm3, SO2_ppb)
#clean possible headings
SO2 = SO2[- grep("ug/m3", SO2$SO2),]
#atomic vector error workaround
SO2_ave = SO2
#combine the date and time columns
SO2_ave$Date <- with(SO2_ave, as.POSIXct(paste(SO2_ave$Date , SO2_ave$Time), format="%Y-%m-%d %H"))
SO2_ave$Time = NULL
SO2 = SO2_ave
#merge daily data for Ringsend
Dublin_Ringsend_CO_NOx_SO2_hr = Reduce(function(x, y) merge(x, y, all=TRUE), list(CO, NOx, SO2))
Dublin_Ringsend_CO_NOx_SO2_hr$Time = NULL
#save the gathered data
write_csv(Dublin_Ringsend_CO_NOx_SO2_hr, "../Gathered_Data/Dublin_Ringsend_CO_NOx_SO2_hr.csv")
#calculate min/max/mean for the data
#change from string to numeric values
Dublin_Ringsend_CO_NOx_SO2_hr$NOx = as.numeric(Dublin_Ringsend_CO_NOx_SO2_hr$NOx)
Dublin_Ringsend_CO_NOx_SO2_hr$NO = as.numeric(Dublin_Ringsend_CO_NOx_SO2_hr$NO)
Dublin_Ringsend_CO_NOx_SO2_hr$NO2 = as.numeric(Dublin_Ringsend_CO_NOx_SO2_hr$NO2)
Dublin_Ringsend_CO_NOx_SO2_hr$CO = as.numeric(Dublin_Ringsend_CO_NOx_SO2_hr$CO)
Dublin_Ringsend_CO_NOx_SO2_hr$SO2 = as.numeric(Dublin_Ringsend_CO_NOx_SO2_hr$SO2)
mean = aggregate(Dublin_Ringsend_CO_NOx_SO2_hr[names(Dublin_Ringsend_CO_NOx_SO2_hr)!='Date'], list(hour=cut(Dublin_Ringsend_CO_NOx_SO2_hr$Date,'day')), mean, na.rm=F)
names(mean) <- gsub("NOx", "NOx_Mean", names(mean))
names(mean) <- gsub("NO$", "NO_Mean", names(mean))
names(mean) <- gsub("NO2", "NO2_Mean", names(mean))
names(mean) <- gsub("CO", "CO_Mean", names(mean))
names(mean) <- gsub("SO2", "SO2_Mean", names(mean))
min = aggregate(Dublin_Ringsend_CO_NOx_SO2_hr[names(Dublin_Ringsend_CO_NOx_SO2_hr)!='Date'], list(hour=cut(Dublin_Ringsend_CO_NOx_SO2_hr$Date,'day')), min, na.rm=F)
names(min) <- gsub("NOx", "NOx_Min", names(min))
names(min) <- gsub("NO$", "NO_Min", names(min))
names(min) <- gsub("NO2", "NO2_Min", names(min))
names(min) <- gsub("CO", "CO_Min", names(min))
names(min) <- gsub("SO2", "SO2_Min", names(min))
max = aggregate(Dublin_Ringsend_CO_NOx_SO2_hr[names(Dublin_Ringsend_CO_NOx_SO2_hr)!='Date'], list(hour=cut(Dublin_Ringsend_CO_NOx_SO2_hr$Date,'day')), max, na.rm=F)
names(max) <- gsub("NOx", "NOx_Max", names(max))
names(max) <- gsub("NO$", "NO_Max", names(max))
names(max) <- gsub("NO2", "NO2_Max", names(max))
names(max) <- gsub("CO", "CO_Max", names(max))
names(max) <- gsub("SO2", "SO2_Max", names(max))
#merge the data
Dublin_Ringsend_CO_NOx_SO2_MMM_hr = Reduce(function(x, y) merge(x, y, all=TRUE), list(mean,min,max))
#change column name to date
colnames(Dublin_Ringsend_CO_NOx_SO2_MMM_hr)[1] = "Date"
#remove hours from data
Dublin_Ringsend_CO_NOx_SO2_MMM_hr$Date = as.Date(Dublin_Ringsend_CO_NOx_SO2_MMM_hr$Date,format='%Y-%m-%d %H')
#save the converted hourly data
write_csv(Dublin_Ringsend_CO_NOx_SO2_MMM_hr, "../Gathered_Data/Dublin_Ringsend_CO_NOx_SO2_MMM_hr_MMM_daily.csv")
#clean the enviroment
rm(list=ls())
#ROSEMOUNT MONTHLY--------------------------------------------------------------------------------
setwd('..')
setwd("Rosemount/")
#All metals 2012
Dublin_Rosemount_AllMetalDeposition_2012_ngm3_month <- read_excel("Dublin_Rosemount_AllMetalDeposition_2012_ngm3_month.xlsx")
#As
#Arsenic, As data and remove header
As_files = dir(pattern = "As")
As_list = lapply(As_files, read_xlsx)
As = do.call(rbind, As_list)
As = As[- grep("ug m-3 day-1", As$As),]
As = As[- grep("ug m-2 day-1", As$As),]
#create a date column for merging data
Dates = as.data.frame(seq(as.Date("2010/1/1"), by = "month", length.out = nrow(As) + 12))
colnames(Dates)[1] = "Date"
#there is no 2011 data so remove that from dates
Date1 = as.Date("2011-12-01")
Date2 = as.Date("2011-01-01")
Dates = filter(Dates, Dates$Date > Date1 | Dates$Date < Date2)
#bind dates with As
As = cbind(As, Dates)
#remove old date months and replace with new date column and swap so the date column is first
As$X__1 = NULL
As = As[,c(2,1)]
#Cadmium, Cd import and clean header data
Cd_files = dir(pattern = "Cd")
Cd_list = lapply(Cd_files, read_xlsx)
Cd = do.call(rbind, Cd_list)
Cd = Cd[- grep("ug m-3 day-1", Cd$Cd),]
Cd = Cd[- grep("ug m-2 day-1", Cd$Cd),]
#same dates as As, add the date column created for that
Cd = cbind(Cd, Dates)
#remove old date months and replace with new date column and swap so the date column is first
Cd$X__1 = NULL
Cd = Cd[,c(2,1)]
#Hg
Hg_files = dir(pattern = "Hg")
Hg_list = lapply(Hg_files, read_xlsx)
Hg = do.call(rbind, Hg_list)
#clean headers
Hg = Hg[- grep("ug m-2 day-1", Hg$Hg),]
#change the months to dates with years
Hg$Date = seq(as.Date("2012/1/1"), by = "month", length.out = nrow(Hg))
#remove old date months and replace with new date column and swap so the date column is first
Hg$X__1 = NULL
Hg = Hg[,c(2,1)]
#Ni
#Nickel, import Ni data and remove header
Ni_files = dir(pattern = "Ni")
Ni_list = lapply(Ni_files, read_xlsx)
Ni = do.call(rbind, Ni_list)
#had to remove rows manually, the search function wouldnt work for some reason
Ni = Ni[- grep("ug m-3 day-1", Ni$Ni),]
Ni = Ni[-c(13, 26), ]
#same dates as As, add the date column created for that
Ni = cbind(Ni, Dates)
#remove old date months and replace with new date column and swap so the date column is first
Ni$X__1 = NULL
Ni = Ni[,c(2,1)]
#Pb
#import lead data, Pb, and remove the headers
Pb_files = dir(pattern = "Pb")
Pb_list = lapply(Pb_files, read_xlsx)
Pb = do.call(rbind, Pb_list)
Pb = Pb[- grep("ug m-3 day-1", Pb$Pb),]
Pb = Pb[- grep("ug m-2 day-1", Pb$Pb),]
#same dates as As, add the date column created for that
Pb = cbind(Pb, Dates)
#remove old date months and replace with new date column and swap so the date column is first
Pb$X__1 = NULL
Pb = Pb[,c(2,1)]
#merge the Rosemount monthly data together
#merge daily data for Ringsend
Dublin_Rosemount_As_Cd_Hg_Ni_Pb = Reduce(function(x, y) merge(x, y, all=TRUE), list(As,Cd,Hg,Ni,Pb))
#remove the day from the date column
Dublin_Rosemount_As_Cd_Hg_Ni_Pb$Date = format(Dublin_Rosemount_As_Cd_Hg_Ni_Pb$Date, format="%Y-%m")
#save the gathered data
write_csv(Dublin_Rosemount_As_Cd_Hg_Ni_Pb, "../Gathered_Data/Dublin_Rosemount_As_Cd_Hg_Ni_Pb_monthly.csv")
#clean the enviroment
rm(list=ls())
#ST. ANNES PARK HOURLY --------------------------------------------------------------------------------
setwd('..')
setwd("StAnnesPark/")
#HOURLY NOx
#import and bind the NOx files
NOx_files = dir(pattern = "NOx.*ugm3|ugm3.*NOx")
NOx_list = lapply(NOx_files, read_xlsx)
NOx_final = do.call(rbind, NOx_list)
#clean the data of headings
NOx_final = NOx_final[- grep("ug/m3", NOx_final$NOx),]
#combine date and time column
#Combine the date and the time for the different pollutants to help with graphing
NOx_final$Date <- with(NOx_final, as.POSIXct(paste(NOx_final$Date, NOx_final$Time), format="%Y-%m-%d %H"))
NOx_final$Time = NULL
NOx = NOx_final
#save the gathered data
write_csv(NOx, "../Gathered_Data/Dublin_StAnnesPark_NOx_hr.csv")
#Convert to daily values
#calculate min/max/mean for the data
#change from string to numeric values
NOx$NOx = as.numeric(NOx$NOx)
NOx$NO = as.numeric(NOx$NO)
NOx$NO2 = as.numeric(NOx$NO2)
mean = aggregate(NOx[names(NOx)!='Date'], list(hour=cut(NOx$Date,'day')), mean, na.rm=F)
names(mean) <- gsub("NOx", "NOx_Mean", names(mean))
names(mean) <- gsub("NO$", "NO_Mean", names(mean))
names(mean) <- gsub("NO2", "NO2_Mean", names(mean))
min = aggregate(NOx[names(NOx)!='Date'], list(hour=cut(NOx$Date,'day')), min, na.rm=F)
names(min) <- gsub("NOx", "NOx_Min", names(min))
names(min) <- gsub("NO$", "NO_Min", names(min))
names(min) <- gsub("NO2", "NO2_Min", names(min))
max = aggregate(NOx[names(NOx)!='Date'], list(hour=cut(NOx$Date,'day')), max, na.rm=F)
names(max) <- gsub("NOx", "NOx_Max", names(max))
names(max) <- gsub("NO$", "NO_Max", names(max))
names(max) <- gsub("NO2", "NO2_Max", names(max))
#merge the data with the CO data
Dublin_StAnnesPark_NOx_MMM_daily = Reduce(function(x, y) merge(x, y, all=TRUE), list(mean,min,max))
#change column name to date
colnames(Dublin_StAnnesPark_NOx_MMM_daily)[1] = "Date"
#remove hours from data
Dublin_StAnnesPark_NOx_MMM_daily$Date = as.Date(Dublin_StAnnesPark_NOx_MMM_daily$Date,format='%Y-%m-%d %H')
#save the converted hourly data
write_csv(Dublin_StAnnesPark_NOx_MMM_daily, "../Gathered_Data/Dublin_StAnnesPark_NOx_MMM_daily.csv")
#clean the enviroment
rm(list=ls())
#ST. ANNES PARK DAILY --------------------------------------------------------------------------------
#PM10
#import PM10 files
PM10_files = dir(pattern = "PM10.*xlsx$")
PM10_list = lapply(PM10_files, read_xlsx)
PM10 = do.call(rbind, PM10_list)
#clean the data of headers
PM10 = PM10[- grep("ug/m3", PM10$PM10),]
#remove hours from data
PM10$Date = as.Date(PM10$Date,format='%Y-%m-%d %H')
#save the gathered data
write_csv(PM10, "../Gathered_Data/Dublin_StAnnesPark_PM10_daily.csv")
#clean the enviroment
rm(list=ls())
#SWORDS HOURLY ------------------------------------------------------------------------------------
setwd('..')
setwd("Swords/")
#HOURLY NOx
#bind the NOx files chronologically
NOx_files = dir(pattern = "*NOx")
NOx_list = lapply(NOx_files, read_xlsx)
NOx_final = do.call(rbind, NOx_list)
#clear rows with ppb written in them
NOx_final = NOx_final[- grep("ug/m3", NOx_final$NO2),]
#Combine the date and the time for the different pollutants to help with graphing
NOx_final$Date <- with(NOx_final, as.POSIXct(paste(NOx_final$Date, NOx_final$Time), format="%Y-%m-%d %H"))
NOx_final$Time = NULL
NOx = NOx_final
#Ozone
#find ppb files
O3_ppb_files = dir(pattern = "O3.*ppb|ppb.*O3")
O3_ppb_list = lapply(O3_ppb_files, read_xlsx)
O3_ppb = do.call(rbind, O3_ppb_list)
#clear rows with ppb written in them
O3_ppb = O3_ppb[- grep("ppb", O3_ppb$ozone),]
#convert all the O3 rows from strings to numerical values for calculations
O3_ppb[, 3] <- sapply(O3_ppb[,3], as.numeric)
#convert O3 from ppb to ugm3. molecular weight is 48. formula is ppb x moleucular weight/22.41
O3_ppb$ozone =O3_ppb$ozone * (48/22.41)
#import ugm3 files
O3_ugm3_files = dir(pattern = "O3.*ugm3|ugm3*O3")
O3_ugm3_list = lapply(O3_ugm3_files, read_xlsx)
O3_ugm3 = do.call(rbind, O3_ugm3_list)
#clear headers
O3_ugm3 = O3_ugm3[- grep("ug/m3", O3_ugm3$ozone),]
#bind the ppb and ugm3 data
O3_final = rbind(O3_ppb, O3_ugm3)
#Combine the date and the time for the different pollutants to help with graphing
O3_final$Date <- with(O3_final, as.POSIXct(paste(O3_final$Date, O3_final$Time), format="%Y-%m-%d %H"))
O3_final$Time = NULL
#plot(O3_final, type = "l")
O3 = O3_final
#merge the data for Swords
Dublin_Swords_NOx_Ozone_hr = Reduce(function(x, y) merge(x, y, all=TRUE), list(NOx, O3))
#save the gathered data
write_csv(Dublin_Swords_NOx_Ozone_hr, "../Gathered_Data/Dublin_Swords_NOx_Ozone_hr.csv")
#Convert to daily values
#calculate min/max/mean for the data
#change from string to numeric values
Dublin_Swords_NOx_Ozone_hr$NOx = as.numeric(Dublin_Swords_NOx_Ozone_hr$NOx)
Dublin_Swords_NOx_Ozone_hr$NO = as.numeric(Dublin_Swords_NOx_Ozone_hr$NO)
Dublin_Swords_NOx_Ozone_hr$NO2 = as.numeric(Dublin_Swords_NOx_Ozone_hr$NO2)
Dublin_Swords_NOx_Ozone_hr$ozone = as.numeric(Dublin_Swords_NOx_Ozone_hr$ozone)
mean = aggregate(Dublin_Swords_NOx_Ozone_hr[names(Dublin_Swords_NOx_Ozone_hr)!='Date'], list(hour=cut(Dublin_Swords_NOx_Ozone_hr$Date,'day')), mean, na.rm=F)
names(mean) <- gsub("NOx", "NOx_Mean", names(mean))
names(mean) <- gsub("NO$", "NO_Mean", names(mean))
names(mean) <- gsub("NO2", "NO2_Mean", names(mean))
names(mean) <- gsub("ozone", "ozone_Mean", names(mean))
min = aggregate(Dublin_Swords_NOx_Ozone_hr[names(Dublin_Swords_NOx_Ozone_hr)!='Date'], list(hour=cut(Dublin_Swords_NOx_Ozone_hr$Date,'day')), min, na.rm=F)
names(min) <- gsub("NOx", "NOx_Min", names(min))
names(min) <- gsub("NO$", "NO_Min", names(min))
names(min) <- gsub("NO2", "NO2_Min", names(min))
names(min) <- gsub("ozone", "ozone_Min", names(min))
max = aggregate(Dublin_Swords_NOx_Ozone_hr[names(Dublin_Swords_NOx_Ozone_hr)!='Date'], list(hour=cut(Dublin_Swords_NOx_Ozone_hr$Date,'day')), max, na.rm=F)
names(max) <- gsub("NOx", "NOx_Max", names(max))
names(max) <- gsub("NO$", "NO_Max", names(max))
names(max) <- gsub("NO2", "NO2_Max", names(max))
names(max) <- gsub("ozone", "ozone_Max", names(max))
#merge the data with the CO data
Dublin_Swords_NOx_Ozone_hr_MMM_daily = Reduce(function(x, y) merge(x, y, all=TRUE), list(mean,min,max))
#change column name to date
colnames(Dublin_Swords_NOx_Ozone_hr_MMM_daily)[1] = "Date"
#remove hours from data
Dublin_Swords_NOx_Ozone_hr_MMM_daily$Date = as.Date(Dublin_Swords_NOx_Ozone_hr_MMM_daily$Date,format='%Y-%m-%d %H')
#save the converted hourly data
write_csv(Dublin_Swords_NOx_Ozone_hr_MMM_daily, "../Gathered_Data/Dublin_Swords_NOx_Ozone_hr_MMM_daily.csv")
#clean the enviroment
rm(list=ls())
#TALLAGHT DAILY ------------------------------------------------------------------------------------
setwd('..')
setwd("Tallaght/")
#DAILY PM10
#import PM10 xls file
PM10_files = dir(pattern = "PM10.*xls$")
PM10_list = lapply(PM10_files, read_xls)
PM10_xls = do.call(rbind, PM10_list)
#import xls files
PM10_files = dir(pattern = "PM10.*xlsx")
PM10_list = lapply(PM10_files, read_xlsx)
PM10_xlsx = do.call(rbind, PM10_list)
#bind the text files and the xls file together. clean the data of headers
PM10 = rbind(PM10_xls, PM10_xlsx)
PM10 = PM10[- grep("ug/m3", PM10$PM10),]
#remove hours from data
PM10$Date = as.Date(PM10$Date,format='%Y-%m-%d %H')
#save the gathered data
write_csv(PM10, "../Gathered_Data/Dublin_Tallaght_PM10_daily.csv")
#TALLAGHT HOURLY ------------------------------------------------------------------------------------
#SO2
#search for ppb SO2 files using logical OR statement
SO2_ppb_files = dir(pattern = "SO2.*ppb|ppb.*SO2")
SO2_ppb_list = lapply(SO2_ppb_files, read_xlsx)
SO2_ppb = do.call(rbind, SO2_ppb_list)
#clean possible headings
SO2_ppb = SO2_ppb[- grep("ppb", SO2_ppb$SO2),]
#change to numeric values for maths
SO2_ppb$SO2 = as.numeric(SO2_ppb$SO2) * (64/22.41)
#search for ugm3 SO2 files using logical OR statement
SO2_files = dir(pattern = "SO2.*ugm3|ugm3*SO2")
SO2_list = lapply(SO2_files, read_xlsx)
SO2_ugm3 = do.call(rbind, SO2_list)
#bind SO2 data
SO2 = rbind(SO2_ppb, SO2_ugm3)
#clean possible headings
SO2 = SO2[- grep("ug/m3", SO2$SO2),]
#save the gathered data
write_csv(SO2, "../Gathered_Data/Dublin_Tallaght_SO2_hr.csv")
#atomic error workaround
SO2_ave = SO2
#Combine the date and the time for the different pollutants to help with graphing
SO2_ave$Date <- with(SO2_ave, as.POSIXct(paste(SO2_ave$Date, SO2_ave$Time), format="%Y-%m-%d %H"))
SO2_ave$Time = NULL
SO2 = SO2_ave
#Convert to daily values
#calculate min/max/mean for the data
#change from string to numeric values
SO2$SO2 = as.numeric(SO2$SO2)
mean = aggregate(SO2[names(SO2)!='Date'], list(hour=cut(SO2$Date,'day')), mean, na.rm=F)
names(mean) <- gsub("SO2", "SO2_Mean", names(mean))
min = aggregate(SO2[names(SO2)!='Date'], list(hour=cut(SO2$Date,'day')), min, na.rm=F)
names(min) <- gsub("SO2", "SO2_Min", names(min))
max = aggregate(SO2[names(SO2)!='Date'], list(hour=cut(SO2$Date,'day')), max, na.rm=F)
names(max) <- gsub("SO2", "SO2_Max", names(max))
#merge the data with the CO data
Dublin_Tallaght_SO2_hr_MMM_daily = Reduce(function(x, y) merge(x, y, all=TRUE), list(mean,min,max))
#change column name to date
colnames(Dublin_Tallaght_SO2_hr_MMM_daily)[1] = "Date"
#remove hours from data
Dublin_Tallaght_SO2_hr_MMM_daily$Date = as.Date(Dublin_Tallaght_SO2_hr_MMM_daily$Date,format='%Y-%m-%d %H')
#save the converted hourly data
write_csv(Dublin_Tallaght_SO2_hr_MMM_daily, "../Gathered_Data/Dublin_Tallaght_SO2_hr_MMM_daily.csv")
#clean the enviroment
rm(list=ls())
#WINETAVERN STREET MONTHLY ------------------------------------------------------------------------------------
setwd('..')
setwd("WinetavernSt/")
#Monthly
#As
#Arsenic, As data and remove header
As_files = dir(pattern = "As")
As_list = lapply(As_files, read_xlsx)
As = do.call(rbind, As_list)
As = As[- grep("ng/m3", As$As),]
#create a date column for merging data. added 12 for the extra year missing missing in the data
Dates = as.data.frame(seq(as.Date("2009/1/1"), by = "month", length.out = nrow(As) + 12))
colnames(Dates)[1] = "Date"
#there is no 2011 data so remove that from dates
Date1 = as.Date("2010-12-01")
Date2 = as.Date("2010-01-01")
Dates = filter(Dates, Dates$Date > Date1 | Dates$Date < Date2)
#bind dates with As
As = cbind(As, Dates)
#remove old date months and replace with new date column and swap so the date column is first
As$X__1 = NULL
As = As[,c(2,1)]
#BaP
BaP_files = dir(pattern = "BaP")
BaP_list = lapply(BaP_files, read_xlsx)
BaP = do.call(rbind, BaP_list)
#clean headers
BaP = BaP[- grep("ng/m3", BaP$`B(a)P`),]
#same dates as As, add the date column created for that
BaP = cbind(BaP, Dates)
#remove old date months and replace with new date column and swap so the date column is first
BaP$X__1 = NULL
BaP = BaP[,c(2,1)]
#Cadmium, Cd import and clean header data
Cd_files = dir(pattern = "Cd")
Cd_list = lapply(Cd_files, read_xlsx)
Cd = do.call(rbind, Cd_list)
#Clean old headers in the data
Cd = Cd[- grep("ng/m3", Cd$Cd),]
#same dates as As, add the date column created for that
Cd = cbind(Cd, Dates)
#remove old date months and replace with new date column and swap so the date column is first
Cd$X__1 = NULL
Cd = Cd[,c(2,1)]
#Ni
#Nickel, import Ni data and remove header
Ni_files = dir(pattern = "Ni")
Ni_list = lapply(Ni_files, read_xlsx)
Ni = do.call(rbind, Ni_list)
#had to remove rows manually, the search function wouldnt work for some reason
Ni = Ni[- grep("ng/m3", Ni$Ni),]
#same dates as As, add the date column created for that
Ni = cbind(Ni, Dates)
#remove old date months and replace with new date column and swap so the date column is first
Ni$X__1 = NULL
Ni = Ni[,c(2,1)]
#Pb
#import lead data, Pb, and remove the headers
Pb_files = dir(pattern = "Pb")
Pb_list = lapply(Pb_files, read_xlsx)
Pb = do.call(rbind, Pb_list)
#clean old headers from the data
Pb = Pb[- grep("ng/m3", Pb$Pb),]
#same dates as As, add the date column created for that
Pb = cbind(Pb, Dates)
#remove old date months and replace with new date column and swap so the date column is first
Pb$X__1 = NULL
Pb = Pb[,c(2,1)]
#merge monthly data for WinetavernSt
Dublin_WinetavernSt_As_BaP_Cd_Ni_Pb = Reduce(function(x, y) merge(x, y, all=TRUE), list(As,BaP,Cd,Ni,Pb))
#remove the day from the date column
Dublin_WinetavernSt_As_BaP_Cd_Ni_Pb$Date = format(Dublin_WinetavernSt_As_BaP_Cd_Ni_Pb$Date, format="%Y-%m")
#save the gathered data
write_csv(Dublin_WinetavernSt_As_BaP_Cd_Ni_Pb, "../Gathered_Data/Dublin_WinetavernSt_As_BaP_Cd_Ni_Pb_monthly.csv")
#clean the enviroment
rm(list=ls())
#WINETAVERN STREET DAILY ------------------------------------------------------------------------------------
#PM10
#xls files
PM10_files = dir(pattern = "*PM10.*(.*)xls$")
PM10_list = lapply(PM10_files, read_xls)
PM10_xls = do.call(rbind, PM10_list)
#txt
PM10_files = dir(pattern = "*PM10.*(.*)txt$")
PM10_list = lapply(PM10_files, read_table2)
PM10_txt = do.call(rbind, PM10_list)
colnames(PM10_txt)[2] = "PM10"
#change the date format for binding
PM10_txt$Date = parse_date_time(PM10_txt$Date, c('dmy', 'ymd'))
#xlsx
PM10_files = dir(pattern = "*PM10.*(.*)xlsx$")
PM10_list = lapply(PM10_files, read_xlsx)
PM10_xlsx = do.call(rbind, PM10_list)
#bind the PM10 data
PM10 = rbind(PM10_xls, PM10_txt ,PM10_xlsx)
#sort the data by date
PM10 = PM10[order(as.Date(PM10$Date, format="%Y/%m/%d")),]
#clean the data of headers
PM10 = PM10[- grep("ug/m3", PM10$PM10),]
#remove hours from data
PM10$Date = as.Date(PM10$Date,format='%Y-%m-%d %H')
#save the gathered data
write_csv(PM10, "../Gathered_Data/Dublin_WinetavernSt_PM10_daily.csv")
#clean the enviroment
rm(list=ls())
#WINETAVERN STREET HOURLY ------------------------------------------------------------------------------------
#CO
#xls files
CO_files = dir(pattern = "*CO.*(.*)xls$")
CO_list = lapply(CO_files, read_xls)
#rename columns for binding. Changing columns labelled Hour to newer Time format used by the EPA. change CO columns to the same heading
CO_list <- lapply(CO_list, function(x) setNames(x, gsub("^Hour$", "Time", names(x))) )
CO_xls = do.call(rbind, CO_list)
#txt
CO_files = dir(pattern = "*CO*(.*)txt$")
CO_list = lapply(CO_files, read_table2)
CO_txt = do.call(rbind, CO_list)
colnames(CO_txt)[3] = "CO"
#change the date format for binding
CO_txt$Date = parse_date_time(CO_txt$Date, c('dmy', 'ymd'))
#xlsx ppm file import and convert to ugm3
CO_files = dir(pattern = "CO.*ppm")
CO_list = lapply(CO_files, read_xlsx)
CO_ppm = do.call(rbind, CO_list)
#clear old headers
CO_ppm = CO_ppm[- grep("ppm", CO_ppm$CO ),]
#convert ppm to mgm3. CO molecular weight is 28. convert from a tring to numerical values
CO_ppm$CO = as.numeric(CO_ppm$CO)
CO_ppm$CO = CO_ppm$CO * (28/22.41)
#xlsx ugm3 file import
CO_files = dir(pattern = "CO.*mgm3")
CO_list = lapply(CO_files, read_xlsx)
CO_mgm3 = do.call(rbind, CO_list)
#clear old headers
CO_mgm3 = CO_mgm3[- grep("mg/m3", CO_mgm3$CO ),]
#bind the CO datasets
CO_final = rbind(CO_xls, CO_txt, CO_ppm, CO_mgm3)
#clear old headers
CO_final = CO_final[- grep("mg/m3", CO_final$CO ),]
#Combine the date and the time for the different pollutants to help with graphing
CO_final$Date <- with(CO_final, as.POSIXct(paste(CO_final$Date, CO_final$Time), format="%Y-%m-%d %H"))
CO_final$Time = NULL
CO = CO_final
#plot(CO, type = "l")
#NOX
#NOX ppb files import
ppb_NOx = dir(pattern = "NOx.*ppb.*(.*)xls$")
NOx_list = lapply(ppb_NOx, read_xls)
#rename columns for binding
NOx_list <- lapply(NOx_list, function(x) setNames(x, gsub("^Hour$", "Time", names(x))) )
NOx_list <- lapply(NOx_list, function(x) setNames(x, gsub("^Nox$", "NOx", names(x))) )
NOx_list <- lapply(NOx_list, function(x) setNames(x, gsub("^NOX$", "NOx", names(x))) )
#bind the data
NOx_ppb = do.call(rbind, NOx_list)
#import ppb xlsx file
NOx_files = dir(pattern = "NOx.*ppb.*(.*)xlsx$")
NOx_list = lapply(NOx_files, read_xlsx)
NOx_ppb_xlsx = do.call(rbind, NOx_list)
#bind ppb dateframes
NOx_ppb = rbind(NOx_ppb, NOx_ppb_xlsx)
#clear rows with ppb written in them
NOx_ppb = NOx_ppb[- grep("ppb", NOx_ppb$NO2),]
#convert all the NO columns from strings to numerical values for calculations
NOx_ppb[, 3:5] <- sapply(NOx_ppb[, 3:5], as.numeric)
#is.numeric(NOx_ppb$NO2)
#convert from ppb to ugm3. molecular weight/22.41
NOx_ppb$NOx = NOx_ppb$NOx * (46/22.41)
NOx_ppb$NO = NOx_ppb$NO * (30/22.41)
NOx_ppb$NO2 = NOx_ppb$NO2 * (46/22.41)
#import ugm3 xls file
NOx_files = dir(pattern = "NOx.*ugm3(.*)xls$")
NOx_list = lapply(NOx_files, read_xls)
NOx_ugm3_xls = do.call(rbind, NOx_list)
#change hour column to Time
colnames(NOx_ugm3_xls)[2] = "Time"
#import ugm3 xlsx file
NOx_files = dir(pattern = "NOx.*ugm3(.*)xlsx$")
NOx_list = lapply(NOx_files, read_xlsx)
NOx_ugm3_xlsx = do.call(rbind, NOx_list)
#xls file has only 3 columns so need to use rbind fill in plyr library
library(plyr)
#bind all NOx data
NOx_final = rbind.fill(NOx_ppb, NOx_ugm3_xls, NOx_ugm3_xlsx)
#clear rows with ppb written in them
NOx_final = NOx_final[- grep("ug/m3", NOx_final$NO2),]
#Combine the date and the time for the different pollutants to help with graphing
NOx_final$Date <- with(NOx_final, as.POSIXct(paste(NOx_final$Date, NOx_final$Time), format="%Y-%m-%d %H"))
NOx_final$Time = NULL
#sort dataframe by time
NOx_final = NOx_final[order(as.Date(NOx_final$Date, format="%Y/%m/%d")),]
NOx = NOx_final
#SO2
#search for ppb SO2 files
SO2_ppb_files = dir(pattern = "SO2.*ppb|ppb.*SO2")
SO2_ppb_list = lapply(SO2_ppb_files, read_xlsx)
SO2_ppb = do.call(rbind, SO2_ppb_list)
#convert SO2 ppb data to ugm3 data for consistency
SO2_ppb$SO2 = as.numeric(SO2_ppb$SO2)
SO2_ppb$SO2 = SO2_ppb$SO2 * (64/22.41)
#search for xls SO2 files
SO2_files = dir(pattern = "SO2.*(.*)xls$")
SO2_list = lapply(SO2_files, read_xls)
#rename columns for binding
SO2_list <- lapply(SO2_list, function(x) setNames(x, gsub("^Hour$", "Time", names(x))))
#bind the files
SO2_xls = do.call(rbind, SO2_list)
#search for ppb SO2 files
SO2_files = dir(pattern = "SO2.*(.*)txt$")
SO2_list = lapply(SO2_files, read_table2)
SO2_txt = do.call(rbind, SO2_list)
#rename column for binding
colnames(SO2_txt)[3] = "SO2"
#change the date format for binding
SO2_txt$Date = parse_date_time(SO2_txt$Date, c('dmy', 'ymd'))
SO2_txt$Date = as.Date(SO2_txt$Date)
#is.Date(SO2_txt$Date)
#search for xlsx SO2 files
SO2_files = dir(pattern = "SO2.*ugm3(.*)xlsx$")
SO2_list = lapply(SO2_files, read_xlsx)
SO2_xlsx = do.call(rbind, SO2_list)
#bind all the SO2 data
SO2_final = rbind(SO2_ppb, SO2_xls, SO2_txt, SO2_xlsx)
#sort dataframe by time
SO2_final = SO2_final[order(as.Date(SO2_final$Date, format="%Y/%m/%d")),]
#Combine the date and the time for the different pollutants to help with graphing
SO2_final$Date <- with(SO2_final, as.POSIXct(paste(SO2_final$Date, SO2_final$Time), format="%Y-%m-%d %H"))
SO2_final$Time = NULL
SO2 = SO2_final
#clear old headers
SO2 = SO2[- grep("ug/m3", SO2$SO2 ),]
#remove Date NA's due to issues when merging
SO2 = SO2 %>% drop_na(Date)
CO = CO %>% drop_na(Date)
NOx = NOx %>% drop_na(Date)
#merge hourly data for WinetavernSt
Dublin_WinetavernSt_CO_NOx_SO2 = Reduce(function(x, y) merge(x, y, all=TRUE), list(CO, NOx, SO2))
#save the gathered data
write_csv(Dublin_WinetavernSt_CO_NOx_SO2, "../Gathered_Data/Dublin_WinetavernSt_CO_NOx_SO2_hr.csv")
#Convert to daily values
#calculate min/max/mean for the data
#change from string to numeric values
Dublin_WinetavernSt_CO_NOx_SO2$NOx = as.numeric(Dublin_WinetavernSt_CO_NOx_SO2$NOx)
Dublin_WinetavernSt_CO_NOx_SO2$NO = as.numeric(Dublin_WinetavernSt_CO_NOx_SO2$NO)
Dublin_WinetavernSt_CO_NOx_SO2$NO2 = as.numeric(Dublin_WinetavernSt_CO_NOx_SO2$NO2)
Dublin_WinetavernSt_CO_NOx_SO2$CO = as.numeric(Dublin_WinetavernSt_CO_NOx_SO2$CO)
Dublin_WinetavernSt_CO_NOx_SO2$SO2 = as.numeric(Dublin_WinetavernSt_CO_NOx_SO2$SO2)
mean = aggregate(Dublin_WinetavernSt_CO_NOx_SO2[names(Dublin_WinetavernSt_CO_NOx_SO2)!='Date'], list(hour=cut(Dublin_WinetavernSt_CO_NOx_SO2$Date,'day')), mean, na.rm=F)
names(mean) <- gsub("NOx", "NOx_Mean", names(mean))
names(mean) <- gsub("NO$", "NO_Mean", names(mean))
names(mean) <- gsub("NO2", "NO2_Mean", names(mean))
names(mean) <- gsub("CO", "CO_Mean", names(mean))
names(mean) <- gsub("SO2", "SO2_Mean", names(mean))
min = aggregate(Dublin_WinetavernSt_CO_NOx_SO2[names(Dublin_WinetavernSt_CO_NOx_SO2)!='Date'], list(hour=cut(Dublin_WinetavernSt_CO_NOx_SO2$Date,'day')), min, na.rm=F)
names(min) <- gsub("NOx", "NOx_Min", names(min))
names(min) <- gsub("NO$", "NO_Min", names(min))
names(min) <- gsub("NO2", "NO2_Min", names(min))
names(min) <- gsub("CO", "CO_Min", names(min))
names(min) <- gsub("SO2", "SO2_Min", names(min))
max = aggregate(Dublin_WinetavernSt_CO_NOx_SO2[names(Dublin_WinetavernSt_CO_NOx_SO2)!='Date'], list(hour=cut(Dublin_WinetavernSt_CO_NOx_SO2$Date,'day')), max, na.rm=F)
names(max) <- gsub("NOx", "NOx_Max", names(max))
names(max) <- gsub("NO$", "NO_Max", names(max))
names(max) <- gsub("NO2", "NO2_Max", names(max))
names(max) <- gsub("CO", "CO_Max", names(max))
names(max) <- gsub("SO2", "SO2_Max", names(max))
#merge the data with the CO data
Dublin_WinetavernSt_CO_NOx_SO2_hr_MMM_daily = Reduce(function(x, y) merge(x, y, all=TRUE), list(mean,min,max))
#change column name to date
colnames(Dublin_WinetavernSt_CO_NOx_SO2_hr_MMM_daily)[1] = "Date"
#remove hours from data
Dublin_WinetavernSt_CO_NOx_SO2_hr_MMM_daily$Date = as.Date(Dublin_WinetavernSt_CO_NOx_SO2_hr_MMM_daily$Date,format='%Y-%m-%d %H')
#save the converted hourly data
write_csv(Dublin_WinetavernSt_CO_NOx_SO2_hr_MMM_daily, "../Gathered_Data/Dublin_WinetavernSt_CO_NOx_SO2_hr_MMM_daily.csv")
#clean the enviroment
rm(list=ls())
#WOODQUAY DAILY --------------------------------------------------------------------------------
setwd('..')
setwd("Woodquay/")
#import PM10 xls file
PM10_files = dir(pattern = "PM10.*xls$")
PM10_list = lapply(PM10_files, read_xls)
PM10_xls = do.call(rbind, PM10_list)
#import PM10 xls file
PM10_files = dir(pattern = "PM10.*txt$")
PM10_list = lapply(PM10_files, read_table2)
PM10_txt = do.call(rbind, PM10_list)
#make sure R recognises Date column from txt files as a date column for proper binding
PM10_txt$Date = as.Date(PM10_txt$Date, format="%d/%m/%Y")
#change column name for binding
colnames(PM10_txt)[2] = "PM10"
#bind all PM10 data
PM10 = rbind(PM10_xls, PM10_txt)
#clean headers from the data
PM10 = PM10[- grep("ug/m3", PM10$PM10),]
#save the gathered data
write_csv(PM10, "../Gathered_Data/Dublin_Woodquay_PM10_daily.csv")
#WOODQUAY HOURLY --------------------------------------------------------------------------------
#import the Benzene file for this region
Dublin_Woodquay_Benzene_2001 <- read_excel("Dublin_Woodquay_Benzene_2001.xls")
#clean headers from the data
Dublin_Woodquay_Benzene_2001 = Dublin_Woodquay_Benzene_2001[- grep("ug/m3", Dublin_Woodquay_Benzene_2001$Benzene),]
#not enough data to save and use
#clean the enviroment
rm(list=ls())
| /DublinAQData.R | no_license | gklen/airquality | R | false | false | 111,375 | r | #Greg Kelly
#libraries used in this script
library(tidyverse)
library(readxl)
library(readr)
library(lubridate)
library(rio)
library(dplyr)
library(plyr)
#change the working directory setwd('..') brings you out of that folder, up one directory level
#getwd()
#setwd("DublinAQData/")
#read in all files from a directory
#files <- list.files()
# ASHTOWNGROVE DAILY ----
setwd("AshtownGrove/")
#read in the text files
Dublin_AshtownGrove_PM10_1996 <- read_table2("Dublin_AshtownGrove_PM10_1996.txt")
Dublin_AshtownGrove_PM10_1997 <- read_table2("Dublin_AshtownGrove_PM10_1997.txt")
#bind the 2 datasets
PM10 = rbind(Dublin_AshtownGrove_PM10_1996, Dublin_AshtownGrove_PM10_1997)
#remove potential headers after binding the data
PM10 = PM10[- grep("PM10", PM10$`PM10(ug/m3)` ),]
#change the date column so R recognises it as a date column
library(lubridate)
#check if R sees it as a date
#sapply(PM10$Date, is.Date)
#set date column as date
PM10$Date <- as.Date(PM10$Date, format="%d/%m/%Y")
#quick look at the plot
#plot(PM10)
#save the output in a created Folder hourly data in main project directory
write_csv(PM10, "../Gathered_Data/Dublin_AshtownGrove_PM10_ugm3_daily.csv")
#clean the enviroment
rm(list=ls())
#BALBRIGGAN DAILY--------------------------------------------------------------------------------
#change directory to balbriggan, change into the daily folder
setwd('..')
setwd("Balbriggan/")
setwd("daily/")
#search the directory and join by the type of pollutants
benzene_files = dir(pattern = "*Benzene")
PM10_files = dir(pattern = "*PM10")
toluene_files = dir(pattern = "*Toluene")
#bind the PM10 files chronologically
PM10_list = lapply(PM10_files, read_xlsx)
Balbriggan_PM10 = do.call(rbind, PM10_list)
#remove rows with headings rather than values in them after the row bind
Balbriggan_PM10 = Balbriggan_PM10[- grep("ug/m3", Balbriggan_PM10$PM10),]
#check the plot
#plot(Balbriggan_PM10, type = "o")
#balbriggan benzene
benzene_list = lapply(benzene_files, read_xlsx)
Balbriggan_Benzene = do.call(rbind, benzene_list)
#balbriggan toluene
toluene_list = lapply(toluene_files , read_xlsx)
Balbriggan_Toluene = do.call(rbind, toluene_list)
#merge the data
Balbriggan_daily = merge(Balbriggan_PM10, Balbriggan_Benzene, by = "Date", all = TRUE)
Balbriggan_daily = merge(Balbriggan_daily, Balbriggan_Toluene, by = "Date", all = TRUE)
#remove rows of data that could be headers
Balbriggan_daily = Balbriggan_daily[- grep("ug/m3", Balbriggan_daily$Benzene ),]
#remove hours from data
Balbriggan_daily$Date = as.Date(Balbriggan_daily$Date,format='%Y-%m-%d %H')
#save file to clean data folder
write_csv(Balbriggan_daily, "../../Gathered_Data/Dublin_Balbriggan_Benzene_Toluene_PM10_daily.csv")
#clean the enviroment
rm(list=ls())
#BALBRIGGAN HOURLY------------------------------------------
setwd("../../Balbriggan/")
setwd("hourly/")
#search the directory and join by the type of pollutants
benzene_files = dir(pattern = "*Benzene")
CO_files = dir(pattern = "*CO")
NOx_files = dir(pattern = "*NOx")
SO2_files = dir(pattern = "*SO2")
toluene_files = dir(pattern = "*Toluene")
#bind benzene files together
benzene_list = lapply(benzene_files, read_xlsx)
Balbriggan_Benzene = do.call(rbind, benzene_list)
#bind the CO files chronologically
CO_list = lapply(CO_files, read_xlsx)
Balbriggan_CO = do.call(rbind, CO_list)
#balbriggan NOx
NOx_list = lapply(NOx_files , read_xlsx)
Balbriggan_NOx = do.call(rbind, NOx_list)
#balbriggan SO2
SO2_list = lapply(SO2_files , read_xlsx)
Balbriggan_SO2 = do.call(rbind, SO2_list)
#remove rows with headings rather than values in them after the row bind
Balbriggan_CO = Balbriggan_CO[- grep("mg/m3", Balbriggan_CO$CO),]
Balbriggan_NOx = Balbriggan_NOx[- grep("ug/m3", Balbriggan_NOx$NOx),]
Balbriggan_SO2 = Balbriggan_SO2[- grep("ug/m3", Balbriggan_SO2$SO2),]
#Combine the date and the time for the different pollutants to help with graphing
Balbriggan_CO$Date <- with(Balbriggan_CO, as.POSIXct(paste(Balbriggan_CO$Date, Balbriggan_CO$Time), format="%Y-%m-%d %H"))
Balbriggan_CO$Time = NULL
Balbriggan_NOx$Date <- with(Balbriggan_NOx, as.POSIXct(paste(Balbriggan_NOx$Date, Balbriggan_NOx$Time), format="%Y-%m-%d %H"))
Balbriggan_NOx$Time = NULL
Balbriggan_SO2$Date <- with(Balbriggan_SO2, as.POSIXct(paste(Balbriggan_SO2$Date, Balbriggan_SO2$Time), format="%Y-%m-%d %H"))
Balbriggan_SO2$Time = NULL
#check the plot
#plot(Balbriggan_CO)
#calculate min, max and mean for hourly data
#CO
Balbriggan_CO$CO = as.numeric(Balbriggan_CO$CO)
mean = aggregate(Balbriggan_CO[names(Balbriggan_CO)!='Date'], list(hour=cut(Balbriggan_CO$Date,'day')), mean, na.rm=F)
colnames(mean)[2] = "CO_Mean"
min = aggregate(Balbriggan_CO[names(Balbriggan_CO)!='Date'], list(hour=cut(Balbriggan_CO$Date,'day')), min, na.rm=F)
colnames(min)[2] = "CO_Min"
max = aggregate(Balbriggan_CO[names(Balbriggan_CO)!='Date'], list(hour=cut(Balbriggan_CO$Date,'day')), max, na.rm=F)
colnames(max)[2] = "CO_Max"
Balbriggan_CO_NOx_SO2_hr_MMM_daily = Reduce(function(x, y) merge(x, y, all=TRUE), list(mean,min,max))
#NOx
Balbriggan_NOx$NOx = as.numeric(Balbriggan_NOx$NOx)
Balbriggan_NOx$NO = as.numeric(Balbriggan_NOx$NO)
Balbriggan_NOx$NO2 = as.numeric(Balbriggan_NOx$NO2)
mean = aggregate(Balbriggan_NOx[names(Balbriggan_NOx)!='Date'], list(hour=cut(Balbriggan_NOx$Date,'day')), mean, na.rm=F)
colnames(mean)[2] = "NOx_Mean"
colnames(mean)[3] = "NO_Mean"
colnames(mean)[4] = "NO2_Mean"
min = aggregate(Balbriggan_NOx[names(Balbriggan_NOx)!='Date'], list(hour=cut(Balbriggan_NOx$Date,'day')), min, na.rm=F)
colnames(min)[2] = "NOx_Min"
colnames(min)[3] = "NO_Min"
colnames(min)[4] = "NO2_Min"
max = aggregate(Balbriggan_NOx[names(Balbriggan_NOx)!='Date'], list(hour=cut(Balbriggan_NOx$Date,'day')), max, na.rm=F)
colnames(max)[2] = "NOx_Max"
colnames(max)[3] = "NO_Max"
colnames(max)[4] = "NO2_Max"
#merge the data with the CO data
Balbriggan_CO_NOx_SO2_hr_MMM_daily = Reduce(function(x, y) merge(x, y, all=TRUE), list(Balbriggan_CO_NOx_SO2_hr_MMM_daily,mean,min,max))
#SO2
Balbriggan_SO2$SO2 = as.numeric(Balbriggan_SO2$SO2)
mean = aggregate(Balbriggan_SO2[names(Balbriggan_SO2)!='Date'], list(hour=cut(Balbriggan_SO2$Date,'day')), mean, na.rm=F)
colnames(mean)[2] = "SO2_Mean"
min = aggregate(Balbriggan_SO2[names(Balbriggan_SO2)!='Date'], list(hour=cut(Balbriggan_SO2$Date,'day')), min, na.rm=F)
colnames(min)[2] = "SO2_Min"
max = aggregate(Balbriggan_SO2[names(Balbriggan_SO2)!='Date'], list(hour=cut(Balbriggan_SO2$Date,'day')), max, na.rm=F)
colnames(max)[2] = "SO2_Max"
#merge the data with existing data
Balbriggan_CO_NOx_SO2_hr_MMM_daily = Reduce(function(x, y) merge(x, y, all=TRUE), list(Balbriggan_CO_NOx_SO2_hr_MMM_daily,mean,min,max))
#change column name to date
colnames(Balbriggan_CO_NOx_SO2_hr_MMM_daily)[1] = "Date"
#remove hours from data
Balbriggan_CO_NOx_SO2_hr_MMM_daily$Date = as.Date(Balbriggan_CO_NOx_SO2_hr_MMM_daily$Date,format='%Y-%m-%d %H')
#save the converted hourly data
write_csv(Balbriggan_CO_NOx_SO2_hr_MMM_daily, "../../Gathered_Data/Dublin_Balbriggan_CO_NOx_SO2_hr_MMM_daily.csv")
#merge the hoourly data
Balbriggan_hourly = merge(Balbriggan_CO, Balbriggan_NOx, by = "Date", all = TRUE)
Balbriggan_hourly = merge(Balbriggan_hourly, Balbriggan_SO2, by = "Date", all = TRUE)
#write out as a csv file into the directory
write_csv(Balbriggan_hourly, "../../Gathered_Data/Dublin_Balbriggan_CO_NOx_SO2_hr.csv")
#clean the enviroment
rm(list=ls())
#BALLYFERMOT DAILY--------------------------------------------------------------------------------
setwd("../../Ballyfermot/")
setwd("daily/")
#for the daily files I manually opened them in excel and changed them from xlx or txt files to xlxs files
PM10_files = dir(pattern = "*PM10")
#bind the PM10 files chronologically
PM10_list = lapply(PM10_files, read_xlsx)
PM10 = do.call(rbind, PM10_list)
#remove any rows with ug/m3 in them
PM10 = PM10[- grep("ug/m3", PM10$PM10),]
#plot(PM10)
#remove hours from data
PM10$Date = as.Date(PM10$Date,format='%Y-%m-%d %H')
#save the data
write_csv(PM10, "../../Gathered_Data/Dublin_Ballyfermot_PM10_ugm3_daily.csv")
#clean the enviroment
rm(list=ls())
#BALLYFERMOT HOURLY--------------------------------------------------------------------------------
setwd("Ballyfermot/")
setwd("hourly/")
#using the library rio to convert txt files types to .csv file types
#already converted commenting out code for testing
# library(rio)
# txt <- dir(pattern = "txt")
# created <- mapply(convert, txt, gsub("txt", "csv", txt))
# unlink(txt) # delete txt files
#
# #change xlsx files to csv
# xlsx <- dir(pattern = "xlsx")
# created <- mapply(convert, xlsx, gsub("xlsx", "csv", xlsx))
# unlink(xlsx)
#
# #change xls files to csv files
# xls <- dir(pattern = "xls")
# created <- mapply(convert, xls, gsub("xls", "csv", xls))
# unlink(xlsx)
# PM10_files
#search for ppb NOX files using logical OR statement
NOx_ppb_files = dir(pattern = "NOx.*ppb|ppb.*NOx")
#NOx ppb files conversion
NOx_ppb_list = lapply(NOx_ppb_files, read_csv)
NOx_ppb = do.call(rbind, NOx_ppb_list)
#clear rows with ppb written in them
NOx_ppb = NOx_ppb[- grep("ppb", NOx_ppb$NO2),]
#convert all the NO columns from strings to numerical values for calculations
NOx_ppb[, 3:5] <- sapply(NOx_ppb[, 3:5], as.numeric)
#is.numeric(NOx_ppb$NO2)
#convert NO from ppb to ugm3. molecular weight is 30. formula is ppb x moleucular weight/22.41
NOx_ppb$NO = NOx_ppb$NO * (30/22.41)
#convert NO2 from ppb to ugm3. molecular weight is 46. formula is ppb x moleucular weight/22.41
NOx_ppb$NO2 = NOx_ppb$NO2 * (46/22.41)
#convert NOX from ppb to ugm3. molecular weight is 46. formula is ppb x moleucular weight/22.41
NOx_ppb$NOx = NOx_ppb$NOx * (46/22.41)
#search for ugm3 NOX files using logical OR statement
NOx_files = dir(pattern = "NOx.*ugm3|ugm3.*NOx")
NOx_list = lapply(NOx_files, read_csv)
NOx = do.call(rbind, NOx_list)
#clear rows with ppb written in them
NOx = NOx[- grep("ug/m3", NOx$NO2),]
#combine both type of files into one
NOx = rbind(NOx,NOx_ppb)
#SO2
#remove columns created from txt conversion to csv. rename the column to be the same as the ugm3 files. save the file replacing existing one
#completed, commented out for testing
# Dublin_Ballyfermot_SO2_2006_ugm3_hr <- read_csv("Dublin_Ballyfermot_SO2_2006_ugm3_hr.csv")
# Dublin_Ballyfermot_SO2_2006_ugm3_hr = Dublin_Ballyfermot_SO2_2006_ugm3_hr[,-c(4:6)]
# names(Dublin_Ballyfermot_SO2_2006_ugm3_hr)[3]<-"SO2"
#
# #Combine the date and the time columns
# Dublin_Ballyfermot_SO2_2006_ugm3_hr$Date = as.Date(Dublin_Ballyfermot_SO2_2006_ugm3_hr$Date, format = "%d/%m/%Y" )
# Dublin_Ballyfermot_SO2_2006_ugm3_hr$Date <- with(Dublin_Ballyfermot_SO2_2006_ugm3_hr, as.POSIXct(paste(Dublin_Ballyfermot_SO2_2006_ugm3_hr$Date, Dublin_Ballyfermot_SO2_2006_ugm3_hr$Time), format="%Y-%m-%d %H"))
#
# #Dublin_Ballyfermot_SO2_2006_ugm3_hr$Date = as.Date(Dublin_Ballyfermot_SO2_2006_ugm3_hr$Date,format='%Y-%m-%d %H')
# write_csv(Dublin_Ballyfermot_SO2_2006_ugm3_hr, "Dublin_Ballyfermot_SO2_2006_ugm3_hr.csv")
#search for ugm3 NOX files using logical OR statement
SO2_files = dir(pattern = "SO2.*ugm3|ugm3.*SO2")
SO2_list = lapply(SO2_files, read_csv)
#rename columns for binding
SO2_list <- lapply(SO2_list, function(x) setNames(x, gsub("^Hour$", "Time", names(x))) )
#bind the SO2 ugm3 data
SO2 = do.call(rbind, SO2_list)
#search for ppb SO2 files using logical OR statement
SO2_ppb_files = dir(pattern = "SO2.*ppb|ppb.*SO2")
SO2_ppb_list = lapply(SO2_ppb_files, read_csv)
SO2_ppb = do.call(rbind, SO2_ppb_list)
#convert SO2 ppb data to ugm3 data for consistency
SO2_ppb$SO2 = as.numeric(SO2_ppb$SO2)
SO2_ppb$SO2 = SO2_ppb$SO2 * (64/22.41)
#bind SO2 data
SO2 = rbind(SO2, SO2_ppb)
#clean possible headings
#SO2 = SO2[- grep("ug/m3", SO2$SO2),]
#SO2 = SO2[- grep("ppb", SO2$SO2),]
#merge the SO2 and NOx data files for Ballyfermot
Ballyfermot_NOx_SO2_hr = merge(NOx,SO2)
#sort time
Ballyfermot_NOx_SO2_hr$Date <- with(Ballyfermot_NOx_SO2_hr, as.POSIXct(paste(Ballyfermot_NOx_SO2_hr$Date, Ballyfermot_NOx_SO2_hr$Time), format="%Y-%m-%d %H"))
#Combine the date and the time for the different pollutants to help with graphing
Ballyfermot_NOx_SO2_hr$Date <- with(Ballyfermot_NOx_SO2_hr, as.POSIXct(paste(Ballyfermot_NOx_SO2_hr$Date, Ballyfermot_NOx_SO2_hr$Time), format="%Y-%m-%d %H"))
Ballyfermot_NOx_SO2_hr$Time = NULL
#save the new ugm3 files in the same directory and delete the old files
write_csv(Ballyfermot_NOx_SO2_hr, "../../Gathered_Data/Dublin_Ballyfermot_NOx_SO2_hr.csv")
#calculate min, max and mean for hourly data
#NOx
NOx$NOx = as.numeric(NOx$NOx)
NOx$NO = as.numeric(NOx$NO)
NOx$NO2 = as.numeric(NOx$NO2)
#atomic vector error workaround
NOx_ave = NOx
#Combine the date and the time
NOx_ave$Date <- with(NOx_ave, as.POSIXct(paste(NOx_ave$Date, NOx_ave$Time), format="%Y-%m-%d %H"))
NOx_ave$Time = NULL
NOx = NOx_ave
mean = aggregate(NOx[names(NOx)!='Date'], list(hour=cut(NOx$Date,'day')), mean, na.rm=F)
names(mean) <- gsub("NOx", "NOx_Mean", names(mean))
names(mean) <- gsub("NO$", "NO_Mean", names(mean))
names(mean) <- gsub("NO2", "NO2_Mean", names(mean))
min = aggregate(NOx[names(NOx)!='Date'], list(hour=cut(NOx$Date,'day')), min, na.rm=F)
names(min) <- gsub("NOx", "NOx_Min", names(min))
names(min) <- gsub("NO$", "NO_Min", names(min))
names(min) <- gsub("NO2", "NO2_Min", names(min))
max = aggregate(NOx[names(NOx)!='Date'], list(hour=cut(NOx$Date,'day')), max, na.rm=F)
names(max) <- gsub("NOx", "NOx_Max", names(max))
names(max) <- gsub("NO$", "NO_Max", names(max))
names(max) <- gsub("NO2", "NO2_Max", names(max))
#merge the data with the CO data
Ballyfermot_NOx_SO2_hr_MMM_daily = Reduce(function(x, y) merge(x, y, all=TRUE), list(mean,min,max))
#SO2
Balbriggan_SO2$SO2 = as.numeric(Balbriggan_SO2$SO2)
mean = aggregate(Balbriggan_SO2[names(Balbriggan_SO2)!='Date'], list(hour=cut(Balbriggan_SO2$Date,'day')), mean, na.rm=F)
colnames(mean)[2] = "SO2_Mean"
min = aggregate(Balbriggan_SO2[names(Balbriggan_SO2)!='Date'], list(hour=cut(Balbriggan_SO2$Date,'day')), min, na.rm=F)
colnames(min)[2] = "SO2_Min"
max = aggregate(Balbriggan_SO2[names(Balbriggan_SO2)!='Date'], list(hour=cut(Balbriggan_SO2$Date,'day')), max, na.rm=F)
colnames(max)[2] = "SO2_Max"
#merge the SO2 data with existing data
Ballyfermot_NOx_SO2_hr_MMM_daily = Reduce(function(x, y) merge(x, y, all=TRUE), list(Ballyfermot_NOx_SO2_hr_MMM_daily,mean,min,max))
#change column name to date
colnames(Ballyfermot_NOx_SO2_hr_MMM_daily)[1] = "Date"
#remove hours from data
Ballyfermot_NOx_SO2_hr_MMM_daily$Date = as.Date(Ballyfermot_NOx_SO2_hr_MMM_daily$Date,format='%Y-%m-%d %H')
#save the converted hourly data
write_csv(Ballyfermot_NOx_SO2_hr_MMM_daily, "../../Gathered_Data/Dublin_Ballyfermot_NOx_SO2_hr_MMM_daily.csv")
#clean the enviroment
rm(list=ls())
#BLANCHARDSTOWN DAILY--------------------------------------------------------------------------------
setwd('..')
setwd('..')
setwd("Blanchardstown/daily")
#convert xls files to csv files and delete the xls files. convert all of the data types to csv to help import files better
#csv was chosen as R seems to like it and could not find a way to convert an xls file to xlxs
# xls <- dir(pattern = "(.*)xls$")
# created <- mapply(convert, xls, gsub("xls", "csv", xls))
# unlink(xls)
#
# #convert xlsx files to csv files and delete the xlsx files
# xlsx <- dir(pattern = "(.*)xlsx$")
# created <- mapply(convert, xlsx, gsub("xlsx", "csv", xlsx))
# unlink(xlsx)
#search for bind the PM10 files chronologically
PM10_files = dir(pattern = "*PM10")
PM10_list = lapply(PM10_files, read_csv)
PM10 = do.call(rbind, PM10_list)
#plot(PM10)
#clear rows with ug/m3 written in them
PM10 = PM10[- grep("ug/m3", PM10$PM10),]
#remove hours from data
PM10$Date = as.Date(PM10$Date,format='%Y-%m-%d %H')
#save the daily data for Blanchardstown
write_csv(PM10, "../../Gathered_Data/Dublin_Blanchardstown_PM10_daily.csv")
#BLANCHARDSTOWN HOURLY--------------------------------------------------------------------------------
setwd("../hourly")
#NOx ppb files conversion
ppb_NOx = dir(pattern = "ppb")
NOx_ppb_list = lapply(ppb_NOx, read_csv)
NOx_ppb = do.call(rbind, NOx_ppb_list)
#clear rows with ppb written in them
NOx_ppb = NOx_ppb[- grep("ppb", NOx_ppb$NO2),]
#convert all the NO columns from strings to numerical values for calculations
NOx_ppb[, 3:5] <- sapply(NOx_ppb[, 3:5], as.numeric)
is.numeric(NOx_ppb$NO2)
#convert NOX from ppb to ugm3. molecular weight is 46 formula is ppb x moleucular weight/22.41
NOx_ppb$NOx = NOx_ppb$NOx * (46/22.41)
#convert NO from ppb to ugm3. molecular weight is 30. formula is ppb x moleucular weight/22.41
NOx_ppb$NO = NOx_ppb$NO * (30/22.41)
#convert NO2 from ppb to ugm3. molecular weight is 46. formula is ppb x moleucular weight/22.41
NOx_ppb$NO2 = NOx_ppb$NO2 * (46/22.41)
#remove the pbb files
unlink(ppb_NOx)
#bind the NOx files chronologically
NOx_files = dir(pattern = "*NOx")
NOx_list = lapply(NOx_files, read_csv)
NOx = do.call(rbind, NOx_list)
#clean the data of headings
NOx = NOx[- grep("ug/m3", NOx$NOx),]
#atomic vector error so putting it into a new dataframe
Blanchardstown_NOx_hr = NOx
#sort time
Blanchardstown_NOx_hr$Date <- with(Blanchardstown_NOx_hr, as.POSIXct(paste(Blanchardstown_NOx_hr$Date, Blanchardstown_NOx_hr$Time), format="%Y-%m-%d %H"))
Blanchardstown_NOx_hr$Time = NULL
#save the output
write.csv(Blanchardstown_NOx_hr, file = "../../Gathered_Data/Dublin_Blanchardstown_NOx_hr.csv")
#determine mean,max, min for the hourly dataset
NOx = Blanchardstown_NOx_hr
#convert all the NO columns from strings to numerical values for calculations
NOx[, 2:4] <- sapply(NOx[, 2:4], as.numeric)
mean = aggregate(NOx[names(NOx)!='Date'], list(hour=cut(NOx$Date,'day')), mean, na.rm=F)
names(mean) <- gsub("NOx", "NOx_Mean", names(mean))
names(mean) <- gsub("NO$", "NO_Mean", names(mean))
names(mean) <- gsub("NO2", "NO2_Mean", names(mean))
min = aggregate(NOx[names(NOx)!='Date'], list(hour=cut(NOx$Date,'day')), min, na.rm=F)
names(min) <- gsub("NOx", "NOx_Min", names(min))
names(min) <- gsub("NO$", "NO_Min", names(min))
names(min) <- gsub("NO2", "NO2_Min", names(min))
max = aggregate(NOx[names(NOx)!='Date'], list(hour=cut(NOx$Date,'day')), max, na.rm=F)
names(max) <- gsub("NOx", "NOx_Max", names(max))
names(max) <- gsub("NO$", "NO_Max", names(max))
names(max) <- gsub("NO2", "NO2_Max", names(max))
#merge the data
Blanchardstown_NOx_MMM_daily = Reduce(function(x, y) merge(x, y, all=TRUE), list(mean,min,max))
#change column name to date
colnames(Blanchardstown_NOx_MMM_daily)[1] = "Date"
#remove hours from data
Blanchardstown_NOx_MMM_daily$Date = as.Date(Blanchardstown_NOx_MMM_daily$Date,format='%Y-%m-%d %H')
#save the converted hourly data
write_csv(Blanchardstown_NOx_MMM_daily, "../../Gathered_Data/Dublin_Blanchardstown_NOx_MMM_daily.csv")
#clean the enviroment
rm(list=ls())
#CITY COUNCIL --------------------------------------------------------------------------------
setwd('..')
setwd('..')
setwd("CityCouncil/")
#Smoke
#bind the Smoke files
Files = dir(pattern = "Smoke")
List = lapply(Files, read_xls)
smoke = do.call(rbind.fill, List)
#change Location column to Date
colnames(smoke)[1] = "Date"
#need to convert numeric excel date to standard date format
library(janitor)
smoke$Date = excel_numeric_to_date(as.numeric(as.character(smoke$Date)), date_system = "modern")
#merge columns that are duplicated due to header name changes
smoke$BrunswickSt <- ifelse(is.na(smoke$`BRUNSWICK ST`), smoke$`BRUNSWICK ST.`, smoke$`BRUNSWICK ST`)
smoke$`BRUNSWICK ST` = NULL
smoke$`BRUNSWICK ST.`= NULL
smoke$RDS2 <- ifelse(is.na(smoke$R.D.S), smoke$RDS, smoke$R.D.S)
smoke$R.D.S = NULL
smoke$RDS = NULL
smoke$HerbertSt <- ifelse(is.na(smoke$`HERBERT ST`), smoke$`HERBERT ST.` , smoke$`HERBERT ST`)
smoke$`HERBERT ST` = NULL
smoke$`HERBERT ST.` = NULL
smoke$OldCountyRd <- ifelse(is.na(smoke$`OLD COUNTY RD`), smoke$`OLD COUNTRY ROAD` , smoke$`OLD COUNTY RD`)
smoke$`OLD COUNTY RD` = NULL
smoke$`OLD COUNTRY ROAD` = NULL
#count nas per column
#map(smoke, ~sum(is.na(.)))
#clean the data of headings
smoke = smoke[- grep("ugm3", smoke$OldCountyRd),]
#add SO2 to all road names
colnames(smoke) <- paste(colnames(smoke), "smoke", sep = "_")
colnames(smoke)[1] = "Date"
#plot(smoke, typle = "l")
#SO2
#bind the Pb files
Files = dir(pattern = "SO2Bubbler")
List = lapply(Files, read_xls)
SO2 = do.call(rbind.fill, List)
#change Location column to Date
colnames(SO2)[1] = "Date"
#need to convert numeric excel date to standard date format
library(janitor)
SO2$Date = excel_numeric_to_date(as.numeric(as.character(SO2$Date)), date_system = "modern")
#merge columns that are duplicated due to header name changes
SO2$BrunswickSt <- ifelse(is.na(SO2$`BRUNSWICK ST`), SO2$`BRUNSWICK ST.`, SO2$`BRUNSWICK ST`)
SO2$`BRUNSWICK ST` = NULL
SO2$`BRUNSWICK ST.`= NULL
SO2$RDS2 <- ifelse(is.na(SO2$R.D.S), SO2$RDS, SO2$R.D.S)
SO2$R.D.S = NULL
SO2$RDS = NULL
SO2$HerbertSt <- ifelse(is.na(SO2$`HERBERT ST`), SO2$`HERBERT ST.` , SO2$`HERBERT ST`)
SO2$`HERBERT ST` = NULL
SO2$`HERBERT ST.` = NULL
SO2$OldCountyRd <- ifelse(is.na(SO2$`OLD COUNTY RD`), SO2$`OLD COUNTRY ROAD` , SO2$`OLD COUNTY RD`)
SO2$`OLD COUNTY RD` = NULL
SO2$`OLD COUNTRY ROAD` = NULL
#sort columns alphabetically to make sure there are no duplicate street names
SO2[ , order(names(SO2))]
#add SO2 to all road names
colnames(SO2) <- paste(colnames(SO2), "SO2", sep = "_")
colnames(SO2)[1] = "Date"
#clean the data of headings
SO2 = SO2[- grep("ugm3", SO2$OldCountyRd),]
#merge datasets
Dublin_CityCouncil_Old_Smoke_SO2_daily = merge(smoke, SO2)
#save the data
write.csv(Dublin_CityCouncil_Old_Smoke_SO2_daily, file = "../Gathered_Data/Dublin_CityCouncil_Old_Smoke_SO2_daily.csv")
#CLONSKEAGH HOURLY --------------------------------------------------------------------------------
setwd('..')
setwd("Clonskeagh/")
#find ppb files
O3_ppb_files = dir(pattern = "O3.*ppb|ppb.*O3")
O3_ppb_list = lapply(O3_ppb_files, read_csv)
O3_ppb = do.call(rbind, O3_ppb_list)
#clear rows with ppb written in them
O3_ppb = O3_ppb[- grep("ppb", O3_ppb$ozone),]
#convert all the O3 columns from strings to numerical values for calculations
O3_ppb[, 3] <- sapply(O3_ppb[,3], as.numeric)
is.numeric(O3_ppb$ozone)
#convert O3 from ppb to ugm3. molecular weight is 48. formula is ppb x moleucular weight/22.41
O3_ppb$ozone =O3_ppb$ozone * (48/22.41)
#convert mgm3 to ugm3 for 2008 file
#Dublin_Clonskeagh_O3_2008 <- read_csv("Dublin_Clonskeagh_O3_2008.csv")
#Dublin_Clonskeagh_O3_2008$ozone = as.numeric(Dublin_Clonskeagh_O3_2008$ozone)/1000
#convert xls files to csv files and delete the xls files
#steps already completed
# xls <- dir(pattern = "(.*)xls$")
# created <- mapply(convert, xls, gsub("xls", "csv", xls))
# unlink(xls)
#
# #convert xlsx files to csv files and delete the xlsx files
# xlsx <- dir(pattern = "(.*)xlsx$")
# created <- mapply(convert, xlsx, gsub("xlsx", "csv", xlsx))
# unlink(xlsx)
#search for bind the PM10 files
O3_files = dir(pattern = "O3.*ugm3|ugm3.*O3")
O3_list = lapply(O3_files, read_csv)
O3 = do.call(rbind, O3_list)
#combine converted ppb files
O3 = rbind(O3,O3_ppb)
#comibine date and hour columnsß
O3$Date <- with(O3, as.POSIXct(paste(O3$Date, O3$Time), format="%Y-%m-%d %H"))
O3$Time = NULL
#remove any rows with ug/m3 in them
O3 = O3[- grep("ug/m3", O3$ozone),]
O3 = O3[- grep("ugm-3", O3$ozone),]
O3 = O3[- grep("mg/m3", O3$ozone),]
#see if it looks ok
#plot(O3)
#save the data
write_csv(O3, "../Gathered_Data/Dublin_Clonskeagh_ozone_hr.csv")
#calculate the mean, max and min of ozone
O3$ozone = as.numeric(O3$ozone)
O3$Date = as.Date(O3$Date, format = "%Y-%m-%d")
mean = aggregate(O3[names(O3)!='Date'], list(hour=cut(O3$Date,'day')), mean, na.rm=F)
names(mean) <- gsub("ozone", "ozone_Mean", names(mean))
min = aggregate(O3[names(O3)!='Date'], list(hour=cut(O3$Date,'day')), min, na.rm=F)
names(min) <- gsub("ozone", "ozone_Min", names(min))
max = aggregate(O3[names(O3)!='Date'], list(hour=cut(O3$Date,'day')), max, na.rm=F)
names(max) <- gsub("ozone", "ozone_Max", names(max))
#remove hours from data
# min$hour = as.Date(min$hour,format='%Y-%m-%d %H')
# max$hour = as.Date(max$hour,format='%Y-%m-%d %H')
# mean$hour = as.Date(mean$hour,format='%Y-%m-%d %H')
#merge the data
Clonskeagh_ozone_MMM_daily = Reduce(function(x, y) merge(x, y, all=TRUE), list(mean,min,max))
#change column name to date
colnames(Clonskeagh_ozone_MMM_daily)[1] = "Date"
#remove hours from data
Clonskeagh_ozone_MMM_daily$Date = as.Date(Clonskeagh_ozone_MMM_daily$Date,format='%Y-%m-%d %H')
#save the converted hourly data
write_csv(Clonskeagh_ozone_MMM_daily, "../Gathered_Data/Dublin_Clonskeagh_ozone_MMM_daily.csv")
#clean the enviroment
rm(list=ls())
#CLONTARF --------------------------------------------------------------------------------
setwd('..')
setwd("Clontarf/")
#import the data using whitespace to seperate the columns
Dublin_Clontarf_PM10_ugm3_daily <- read_table2("Dublin_Clontarf_PM10_1996.txt")
#not much data present so it is unusable
#clean the enviroment
rm(list=ls())
#COLERAINE STREET HOURLY--------------------------------------------------------------------------------
setwd('..')
setwd("ColeraineStreet/hourly")
#convert xls files to csv files and delete the xls files
# xls <- dir(pattern = "(.*)xls$")
# created <- mapply(convert, xls, gsub("xls", "csv", xls))
# unlink(xls)
#
# #convert xlsx files to csv files and delete the xlsx files
# xlsx <- dir(pattern = "(.*)xlsx$")
# created <- mapply(convert, xlsx, gsub("xlsx", "csv", xlsx))
# unlink(xlsx)
#
# #convert txt files to csv files and delete the txt files
# txt <- dir(pattern = "(.*)txt$")
# created <- mapply(convert, txt, gsub("txt", "csv", txt))
# unlink(txt)
#CO
#search for and bind the CO files chronologically. combine the time and date columns into one column
CO_files = dir(pattern = "CO.*mgm3|mgm3.*CO")
CO_list = lapply(CO_files, read_csv)
CO = do.call(rbind, CO_list)
#clear old headers
CO = CO[- grep("mg/m3", CO$CO ),]
#import the ppm files and convert to mg/m3
CO_ppm_files = dir(pattern = "CO.*ppm|ppm.*CO")
CO_ppm_list = lapply(CO_ppm_files, read_csv)
CO_ppm = do.call(rbind, CO_ppm_list)
#clear old headers
CO_ppm = CO_ppm[- grep("ppm", CO_ppm$CO ),]
#convert ppm to mgm3. CO molecular weight is 28. convert from a tring to numerical values
CO_ppm$CO = as.numeric(CO_ppm$CO)
CO_ppm$CO = CO_ppm$CO * (28/22.41)
#bind the 2 CO files
CO = rbind(CO,CO_ppm)
#NOx
#NOx ppb files conversion
ppb_NOx = dir(pattern = "NOx.*ppb|ppb.*NOx")
NOx_ppb_list = lapply(ppb_NOx, read_csv)
NOx_ppb = do.call(rbind, NOx_ppb_list)
#clear rows with ppb written in them
NOx_ppb = NOx_ppb[- grep("ppb", NOx_ppb$NO2),]
#convert all the NO columns from strings to numerical values for calculations
NOx_ppb[, 3:5] <- sapply(NOx_ppb[, 3:5], as.numeric)
#is.numeric(NOx_ppb$NO2)
#convert NO from ppb to ugm3. molecular weight is 30. formula is ppb x moleucular weight/22.41
NOx_ppb$NO = NOx_ppb$NO * (30/22.41)
#convert NO2 from ppb to ugm3. molecular weight is 46. formula is ppb x moleucular weight/22.41
NOx_ppb$NO2 = NOx_ppb$NO2 * (46/22.41)
#convert NOX from ppb to ugm3
NOx_ppb$NOx = NOx_ppb$NOx * (46/22.41)
#NOx ugm3 files
NOx_files = dir(pattern = "*NOx.*ugm3|ugm3.*NOx")
NOx_list = lapply(NOx_files, read_csv)
NOx = do.call(rbind, NOx_list)
#clean the data of headings
NOx = NOx[- grep("ug/m3", NOx$NOx),]
#bind converted ppb files and ugm3 files
NOx = rbind(NOx, NOx_ppb)
#plot(NOx)
#SO2
#some file preperation required / needed to run this code only once, commented out for testing purpouses
# Dublin_ColeraineSt_SO2_2006_ugm3_hr <- read_csv("Dublin_ColeraineSt_SO2_2006_ugm3_hr.csv")
# Dublin_ColeraineSt_SO2_2006_ugm3_hr = as.data.frame(subset(Dublin_ColeraineSt_SO2_2006_ugm3_hr, select=-c(V4,V5,V6)))
# colnames(Dublin_ColeraineSt_SO2_2006_ugm3_hr)[colnames(Dublin_ColeraineSt_SO2_2006_ugm3_hr) == 'SO2(ug/m3)'] <- 'SO2'
# write.csv(Dublin_ColeraineSt_SO2_2006_ugm3_hr, file = "Dublin_ColeraineSt_SO2_2006_ugm3_hr.csv", row.names=FALSE)
#search for ugm3 SO2 files using logical OR statement
SO2_files = dir(pattern = "SO2.*ugm3|ugm3.*SO2")
SO2_list = lapply(SO2_files, read_csv)
#rename columns for binding. Changing columns labelled Hour to newer Time format used by the EPA. change SO2 columns to the same heading
SO2_list <- lapply(SO2_list, function(x) setNames(x, gsub("^Hour$", "Time", names(x))) )
#bind the SO2 ugm3 data
SO2 = do.call(rbind, SO2_list)
#search for ppb SO2 files using logical OR statement
SO2_ppb_files = dir(pattern = "SO2.*ppb|ppb.*SO2")
SO2_ppb_list = lapply(SO2_ppb_files, read_csv)
SO2_ppb = do.call(rbind, SO2_ppb_list)
#bind SO2 data
SO2 = rbind(SO2, SO2_ppb)
#clean possible headings
SO2 = SO2[- grep("ug/m3", SO2$SO2),]
SO2 = SO2[- grep("ppb", SO2$SO2),]
#merge the SO2 and NOx data files for Ballyfermot
ColeraineSt_CO_NOx_SO2_hr = merge(CO,NOx)
ColeraineSt_CO_NOx_SO2_hr = merge(ColeraineSt_CO_NOx_SO2_hr,SO2)
#combine date and hour and write to directory
ColeraineSt_CO_NOx_SO2_hr$Date <- with(ColeraineSt_CO_NOx_SO2_hr, as.POSIXct(paste(ColeraineSt_CO_NOx_SO2_hr$Date, ColeraineSt_CO_NOx_SO2_hr$Time), format="%Y-%m-%d %H"))
ColeraineSt_CO_NOx_SO2_hr$Time = NULL
write_csv(ColeraineSt_CO_NOx_SO2_hr, "../../Gathered_Data/Dublin_ColeraineSt_CO_NOx_SO2_hr.csv")
#calculate min, max and mean for hourly data
#CO
#combine date and hour columns. atomic vector error workaround
CO_ave = CO
CO_ave$Date <- with(CO_ave, as.POSIXct(paste(CO_ave$Date, CO_ave$Time), format="%Y-%m-%d %H"))
CO_ave$Time = NULL
CO = CO_ave
CO$CO = as.numeric(CO$CO)
mean = aggregate(CO[names(CO)!='Date'], list(hour=cut(CO$Date,'day')), mean, na.rm=F)
colnames(mean)[2] = "CO_Mean"
min = aggregate(CO[names(CO)!='Date'], list(hour=cut(CO$Date,'day')), min, na.rm=F)
colnames(min)[2] = "CO_Min"
max = aggregate(CO[names(CO)!='Date'], list(hour=cut(CO$Date,'day')), max, na.rm=F)
colnames(max)[2] = "CO_Max"
ColeraineStreet_CO_NOx_SO2_hr_MMM_daily = Reduce(function(x, y) merge(x, y, all=TRUE), list(mean,min,max))
#NOx
#combine date and hour columns. atomic vector error workaround
NOx_ave = NOx
NOx_ave$Date <- with(NOx_ave, as.POSIXct(paste(NOx_ave$Date, NOx_ave$Time), format="%Y-%m-%d %H"))
NOx_ave$Time = NULL
NOx = NOx_ave
#change from string to numeric values
NOx$NOx = as.numeric(NOx$NOx)
NOx$NO = as.numeric(NOx$NO)
NOx$NO2 = as.numeric(NOx$NO2)
mean = aggregate(NOx[names(NOx)!='Date'], list(hour=cut(NOx$Date,'day')), mean, na.rm=F)
names(mean) <- gsub("NOx", "NOx_Mean", names(mean))
names(mean) <- gsub("NO$", "NO_Mean", names(mean))
names(mean) <- gsub("NO2", "NO2_Mean", names(mean))
min = aggregate(NOx[names(NOx)!='Date'], list(hour=cut(NOx$Date,'day')), min, na.rm=F)
names(min) <- gsub("NOx", "NOx_Min", names(min))
names(min) <- gsub("NO$", "NO_Min", names(min))
names(min) <- gsub("NO2", "NO2_Min", names(min))
max = aggregate(NOx[names(NOx)!='Date'], list(hour=cut(NOx$Date,'day')), max, na.rm=F)
names(max) <- gsub("NOx", "NOx_Max", names(max))
names(max) <- gsub("NO$", "NO_Max", names(max))
names(max) <- gsub("NO2", "NO2_Max", names(max))
#merge the data with the CO data
ColeraineStreet_CO_NOx_SO2_hr_MMM_daily = Reduce(function(x, y) merge(x, y, all=TRUE), list(ColeraineStreet_CO_NOx_SO2_hr_MMM_daily,mean,min,max))
#SO2
#combine date and hour columns. atomic vector error workaround
SO2_ave = SO2
SO2_ave$Date <- with(SO2_ave, as.POSIXct(paste(SO2_ave$Date, SO2_ave$Time), format="%Y-%m-%d %H"))
SO2_ave$Time = NULL
SO2 = SO2_ave
SO2$SO2 = as.numeric(SO2$SO2)
mean = aggregate(SO2[names(SO2)!='Date'], list(hour=cut(SO2$Date,'day')), mean, na.rm=F)
colnames(mean)[2] = "SO2_Mean"
min = aggregate(SO2[names(SO2)!='Date'], list(hour=cut(SO2$Date,'day')), min, na.rm=F)
colnames(min)[2] = "SO2_Min"
max = aggregate(SO2[names(SO2)!='Date'], list(hour=cut(SO2$Date,'day')), max, na.rm=F)
colnames(max)[2] = "SO2_Max"
#merge the data with existing data
ColeraineStreet_CO_NOx_SO2_hr_MMM_daily = Reduce(function(x, y) merge(x, y, all=TRUE), list(ColeraineStreet_CO_NOx_SO2_hr_MMM_daily,mean,min,max))
#change column name to date
colnames(ColeraineStreet_CO_NOx_SO2_hr_MMM_daily)[1] = "Date"
#remove hours from data
ColeraineStreet_CO_NOx_SO2_hr_MMM_daily$Date = as.Date(ColeraineStreet_CO_NOx_SO2_hr_MMM_daily$Date,format='%Y-%m-%d %H')
#save the converted hourly data
write_csv(ColeraineStreet_CO_NOx_SO2_hr_MMM_daily, "../../Gathered_Data/Dublin_ColeraineSt_CO_NOx_SO2_hr_MMM_daily.csv")
#plot(ColeraineSt_CO_NOx_SO2_hr)
#clean the enviroment
rm(list=ls())
#COLERAINE STREET DAILY--------------------------------------------------------------------------------
#Lead - Pb
setwd('../daily')
#convert xls files to csv files and delete the xls files
#commeneted out as it has already been completed
# xls <- dir(pattern = "(.*)xls$")
# created <- mapply(convert, xls, gsub("xls", "csv", xls))
# unlink(xls)
#
# #convert xlsx files to csv files and delete the xlsx files
# xlsx <- dir(pattern = "(.*)xlsx$")
# created <- mapply(convert, xlsx, gsub("xlsx", "csv", xlsx))
# unlink(xlsx)
#
# #convert txt files to csv files and delete the txt files
# txt <- dir(pattern = "(.*)txt$")
# created <- mapply(convert, txt, gsub("txt", "csv", txt))
# unlink(txt)
#Pb
#bind the Pb files
Pb_files = dir(pattern = "*Pb")
Pb_list = lapply(Pb_files, read_csv)
Pb = do.call(rbind, Pb_list)
#clean the data of headers
Pb = Pb[- grep("ug/m3", Pb$Pb),]
#bind the PM2.5 files
PM25_files = dir(pattern = "*PM25")
PM25_list = lapply(PM25_files, read_csv)
PM25 = do.call(rbind, PM25_list)
#clean the data of headers
PM25 = PM25[- grep("ug/m3", PM25$PM2.5),]
#commented out for testing code, only needed to run this code once
#PM10
#some files need tidying up ie reorganinsing and head name changes
# Dublin_ColeraineSt_PM10_2004_ugm3 <- read_csv("Dublin_ColeraineSt_PM10_2004_ugm3.csv")
# Dublin_ColeraineSt_PM10_2004_ugm3$`PM10(ug/m3)` = NULL
# names(Dublin_ColeraineSt_PM10_2004_ugm3)[names(Dublin_ColeraineSt_PM10_2004_ugm3) == "Date"] = "PM10"
# names(Dublin_ColeraineSt_PM10_2004_ugm3)[names(Dublin_ColeraineSt_PM10_2004_ugm3) == "V1"] = "Date"
#
# #change the date format for binding
# Dublin_ColeraineSt_PM10_2004_ugm3$Date = parse_date_time(Dublin_ColeraineSt_PM10_2004_ugm3$Date, c('dmy', 'ymd'))
#
# write.csv(Dublin_ColeraineSt_PM10_2004_ugm3, file = "Dublin_ColeraineSt_PM10_2004_ugm3.csv", row.names=FALSE)
#
# #PM10_2005 file
# Dublin_ColeraineSt_PM10_2005_ugm3 <- read_csv("Dublin_ColeraineSt_PM10_2005_ugm3.csv")
# Dublin_ColeraineSt_PM10_2005_ugm3$`PM10(ug/m3)` = NULL
# names(Dublin_ColeraineSt_PM10_2005_ugm3)[names(Dublin_ColeraineSt_PM10_2005_ugm3) == "Date"] = "PM10"
# names(Dublin_ColeraineSt_PM10_2005_ugm3)[names(Dublin_ColeraineSt_PM10_2005_ugm3) == "V1"] = "Date"
# Dublin_ColeraineSt_PM10_2005_ugm3$Date = parse_date_time(Dublin_ColeraineSt_PM10_2005_ugm3$Date, c('dmy', 'ymd'))
# write.csv(Dublin_ColeraineSt_PM10_2005_ugm3, file = "Dublin_ColeraineSt_PM10_2005_ugm3.csv", row.names=FALSE)
#
# #PM10_2006 file
# Dublin_ColeraineSt_PM10_2006_ugm3 <- read_csv("Dublin_ColeraineSt_PM10_2006_ugm3.csv")
# Dublin_ColeraineSt_PM10_2006_ugm3 <- Dublin_ColeraineSt_PM10_2006_ugm3[, -c(3:4)]
# colnames(Dublin_ColeraineSt_PM10_2006_ugm3)[2] = "PM10"
# Dublin_ColeraineSt_PM10_2006_ugm3$Date = parse_date_time(Dublin_ColeraineSt_PM10_2006_ugm3$Date, c('dmy', 'ymd'))
# write.csv(Dublin_ColeraineSt_PM10_2006_ugm3, file = "Dublin_ColeraineSt_PM10_2006_ugm3.csv", row.names=FALSE)
#bind the PM10 files
PM10_files = dir(pattern = "*PM10")
PM10_list = lapply(PM10_files, read_csv)
PM10 = do.call(rbind, PM10_list)
#clean the data of headers
PM10 = PM10[- grep("ug/m3", PM10$PM10),]
PM10 = PM10[- grep("ugm3", PM10$PM10),]
#set all date columns to time so r can merge the files
PM10$Date = as.Date(PM10$Date)
PM25$Date = as.Date(PM25$Date)
Pb$Date = as.Date(Pb$Date)
#merge daily data
Dublin_ColeraineSt_Pb_PM10_PM25_daily = merge(PM10, Pb)
Dublin_ColeraineSt_Pb_PM10_PM25_daily = merge(Dublin_ColeraineSt_Pb_PM10_PM25_daily, PM25, by = "Date", all = TRUE)
#save the cleaned dateframe
write.csv(Dublin_ColeraineSt_Pb_PM10_PM25_daily, file = "../../Gathered_Data/Dublin_ColeraineSt_Pb_PM10_PM25_daily.csv", row.names=FALSE)
#clean the enviroment
rm(list=ls())
#COLLEGE GREEN DAILY--------------------------------------------------------------------------------
setwd('..')
setwd('..')
setwd("CollegeGreen/")
#read in PM10 files
PM10_files = dir(pattern = "PM10.*txt")
PM10_list = lapply(PM10_files, read_table2)
PM10 = do.call(rbind, PM10_list)
#change column name for binding
colnames(PM10)[2] = "PM10"
#make sure R recognises Date column from txt files as a date column for proper binding
PM10$Date = as.Date(PM10$Date, format="%d/%m/%Y")
#import xls files
PM10_files = dir(pattern = "PM10.*xls")
PM10_list = lapply(PM10_files, read_xls)
PM10_xls = do.call(rbind, PM10_list)
#make sure R recognises Date column from txt files as a date column for proper binding
PM10_xls$Date = as.Date(PM10_xls$Date, format="%Y/%m/%d")
#bind the text files and the xls file together. clean the data of headers
PM10 = rbind(PM10, PM10_xls)
PM10 = PM10[- grep("ug/m3", PM10$PM10),]
#save the data
write.csv(PM10, file = "../Gathered_Data/Dublin_CollegeGreen_PM10_daily.csv", row.names=FALSE)
#clean the enviroment
rm(list=ls())
#COUNTY COUNCIL --------------------------------------------------------------------------------
setwd('..')
setwd("CountyCouncil/")
#Smoke
#bind the smoke files
Files = dir(pattern = "Smoke")
List = lapply(Files, read_xls)
smoke = do.call(rbind.fill, List)
#change Location column to Date
colnames(smoke)[1] = "Date"
#need to convert numeric excel date to standard date format
library(janitor)
smoke$Date = excel_numeric_to_date(as.numeric(as.character(smoke$Date)), date_system = "modern")
#sort columns alphabetically to make sure there are no duplicate street names
test = smoke[ , order(names(smoke))]
#merge columns that are duplicated due to header name changes
smoke$Avonbeg <- ifelse(is.na(smoke$ABEG), smoke$AVONBEG, smoke$ABEG)
smoke$ABEG = NULL
smoke$AVONBEG= NULL
smoke$Balbriggan <- ifelse(is.na(smoke$BALBRIGGAN), smoke$BBGAN, smoke$BALBRIGGAN)
smoke$BALBRIGGAN = NULL
smoke$BBGAN = NULL
smoke$Brookfield <- ifelse(is.na(smoke$BROOKFIELD ), smoke$BROOK , smoke$BROOKFIELD)
smoke$BROOKFIELD = NULL
smoke$BROOK = NULL
smoke$MountAnville <- ifelse(is.na(smoke$`MOUNT ANVIL` ), smoke$`MOUNT ANVILLE` , smoke$`MOUNT ANVIL`)
smoke$`MOUNT ANVIL` = NULL
smoke$`MOUNT ANVILLE` = NULL
smoke$QuarryVale <- ifelse(is.na(smoke$QVALE ), smoke$QUARRYVALE , smoke$QVALE)
smoke$QVALE = NULL
smoke$QUARRYVALE = NULL
smoke$QuarryVale = ifelse(is.na(smoke$QuarryVale), smoke$QUARYVALE, smoke$QuarryVale)
smoke$QUARYVALE = NULL
#count nas per column
map(smoke, ~sum(is.na(.)))
#clean the data of headings
smoke = smoke[- grep("ugm3", smoke$DUNLAOIRE),]
#add smoke to all road names
colnames(smoke) <- paste(colnames(smoke), "smoke", sep = "_")
colnames(smoke)[1] = "Date"
#plot(smoke, typle = "l")
#SO2
#bind the Pb files
Files = dir(pattern = "SO2Bubbler")
List = lapply(Files, read_xls)
SO2 = do.call(rbind.fill, List)
#change Location column to Date
colnames(SO2)[1] = "Date"
#need to convert numeric excel date to standard date format
library(janitor)
SO2$Date = excel_numeric_to_date(as.numeric(as.character(SO2$Date)), date_system = "modern")
#sort columns alphabetically to make sure there are no duplicate street names
#test = SO2[ , order(names(SO2))]
#merge columns that are duplicated due to header name changes
SO2$MountAnville <- ifelse(is.na(SO2$`MOUNT ANVIL` ), SO2$`MOUNT ANVILLE` , SO2$`MOUNT ANVIL`)
SO2$`MOUNT ANVIL` = NULL
SO2$`MOUNT ANVILLE` = NULL
#add SO2 to all road names
colnames(SO2) <- paste(colnames(SO2), "SO2", sep = "_")
colnames(SO2)[1] = "Date"
#clean the data of headings
SO2 = SO2[- grep("ugm3", SO2$DUNLAOIRE_SO2),]
#merge datasets
Dublin_CountyCouncil_Old_Smoke_SO2_daily = merge(smoke, SO2)
#save the data
write.csv(Dublin_CountyCouncil_Old_Smoke_SO2_daily, file = "../Gathered_Data/Dublin_CountyCouncil_Old_Smoke_SO2_daily_daily.csv")
#CRUMLIN HOURLY--------------------------------------------------------------------------------
setwd('..')
setwd("Crumlin/")
#import benzene files
benzene_files = dir(pattern = "*Benzene")
benzene_list = lapply(benzene_files, read_xls)
Benzene = do.call(rbind, benzene_list)
Benzene_hr = Benzene
#combine date and hour columns
Benzene_hr$Date <- with(Benzene_hr, as.POSIXct(paste(Benzene_hr$Date, Benzene_hr$Hour), format="%Y-%m-%d %H"))
Benzene_hr$Hour = NULL
#search for ppb NOX files using logical OR statement
NOx_ppb_files = dir(pattern = "NOx.*ppb|ppb.*NOx")
#NOx ppb files conversion
NOx_ppb_list = lapply(NOx_ppb_files, read_xls)
NOx_ppb = do.call(rbind, NOx_ppb_list)
#clear rows with ppb written in them
NOx_ppb = NOx_ppb[- grep("ppb", NOx_ppb$NO2),]
#convert all the NO columns from strings to numerical values for calculations
NOx_ppb[, 3:5] <- sapply(NOx_ppb[, 3:5], as.numeric)
#is.numeric(NOx_ppb$NO2)
#convert NO from ppb to ugm3. molecular weight is 30. formula is ppb x moleucular weight/22.41
NOx_ppb$NO = NOx_ppb$NO * (30/22.41)
#convert NO2 from ppb to ugm3. molecular weight is 46. formula is ppb x moleucular weight/22.41
NOx_ppb$NO2 = NOx_ppb$NO2 * (46/22.41)
#convert NOX from ppb to ugm3. molecular weight is 46. formula is ppb x moleucular weight/22.41
NOx_ppb$NOx = NOx_ppb$NOx * (46/22.41)
#NOx files import
NOx_files = dir(pattern = "NOx.*ugm3")
NOx_list = lapply(NOx_files, read_xls)
NOx = do.call(rbind, NOx_list)
#clear rows with ppb written in them
NOx = NOx[- grep("ug/m3", NOx$NO2),]
#change NO2 to numeric format for binding
NOx$NO2 = as.numeric(NOx$NO2)
#rowbind data with missing columns
library(data.table)
NOx = bind_rows(NOx, NOx_ppb)
#avoid atomic vector errors
NOx_hr = NOx
#combine date and hour columns
NOx_hr$Date <- with(NOx_hr, as.POSIXct(paste(NOx_hr$Date, NOx_hr$Hour), format="%Y-%m-%d %H"))
NOx_hr$Hour = NULL
#merge the datasets
Crumlin_Benzene_NOx_ugm3_hr = Reduce(function(x, y) merge(x, y, all=TRUE), list(NOx_hr, Benzene_hr))
#clean the data
Crumlin_Benzene_NOx_ugm3_hr = Crumlin_Benzene_NOx_ugm3_hr[- grep("ug/m3", Crumlin_Benzene_NOx_ugm3_hr$Benzene),]
#save the data
write_csv(Crumlin_Benzene_NOx_ugm3_hr, "../Gathered_Data/Dublin_Crumlin_Benzene_NOx_ugm3_hr.csv")
#NOx
#change from string to numeric values
Crumlin_Benzene_NOx_ugm3_hr$NOx = as.numeric(Crumlin_Benzene_NOx_ugm3_hr$NOx)
Crumlin_Benzene_NOx_ugm3_hr$NO = as.numeric(Crumlin_Benzene_NOx_ugm3_hr$NO)
Crumlin_Benzene_NOx_ugm3_hr$NO2 = as.numeric(Crumlin_Benzene_NOx_ugm3_hr$NO2)
Crumlin_Benzene_NOx_ugm3_hr$Benzene = as.numeric(Crumlin_Benzene_NOx_ugm3_hr$Benzene)
#Mean min and max
mean = aggregate(Crumlin_Benzene_NOx_ugm3_hr[names(Crumlin_Benzene_NOx_ugm3_hr)!='Date'], list(hour=cut(Crumlin_Benzene_NOx_ugm3_hr$Date,'day')), mean, na.rm=F)
names(mean) <- gsub("NOx", "NOx_Mean", names(mean))
names(mean) <- gsub("NO$", "NO_Mean", names(mean))
names(mean) <- gsub("NO2", "NO2_Mean", names(mean))
names(mean) <- gsub("Benzene", "Benzene_Mean", names(mean))
min = aggregate(Crumlin_Benzene_NOx_ugm3_hr[names(Crumlin_Benzene_NOx_ugm3_hr)!='Date'], list(hour=cut(Crumlin_Benzene_NOx_ugm3_hr$Date,'day')), min, na.rm=F)
names(min) <- gsub("NOx", "NOx_Min", names(min))
names(min) <- gsub("NO$", "NO_Min", names(min))
names(min) <- gsub("NO2", "NO2_Min", names(min))
names(min) <- gsub("Benzene", "Benzene_Min", names(min))
max = aggregate(Crumlin_Benzene_NOx_ugm3_hr[names(Crumlin_Benzene_NOx_ugm3_hr)!='Date'], list(hour=cut(Crumlin_Benzene_NOx_ugm3_hr$Date,'day')), max, na.rm=F)
names(max) <- gsub("NOx", "NOx_Max", names(max))
names(max) <- gsub("NO$", "NO_Max", names(max))
names(max) <- gsub("NO2", "NO2_Max", names(max))
names(max) <- gsub("Benzene", "Benzene_Max", names(max))
#merge the data
Crumlin_Benzene_NOx_hr_MMM_daily = Reduce(function(x, y) merge(x, y, all=TRUE), list(mean,min,max))
#change column name to date
colnames(Crumlin_Benzene_NOx_hr_MMM_daily)[1] = "Date"
#remove hours from data
Crumlin_Benzene_NOx_hr_MMM_daily$Date = as.Date(Crumlin_Benzene_NOx_hr_MMM_daily$Date,format='%Y-%m-%d %H')
#save the converted hourly data
write_csv(Crumlin_Benzene_NOx_hr_MMM_daily, "../Gathered_Data/Dublin_Crumlin_Benzene_NOx_hr_MMM_daily.csv")
#clean the enviroment
rm(list=ls())
#DAVITT ROAD DAILY --------------------------------------------------------------------------------
setwd('..')
setwd("DavittRd/")
#bind the PM10 files
PM10_files = dir(pattern = "*PM10")
PM10_list = lapply(PM10_files, read_xlsx)
PM10 = do.call(rbind, PM10_list)
#remove headers
PM10 = PM10[- grep("ug/m3", PM10$PM10),]
#swap year and date in the time column
PM10$Date = parse_date_time(PM10$Date, c('dmy', 'ymd'))
#save the data
write_csv(PM10,"../Gathered_Data/Dublin_DavittRd_PM10_ugm3_daily.csv")
#clean the enviroment
rm(list=ls())
#DUNLAOIGHAIRE HOURLY--------------------------------------------------------------------------------
setwd('..')
setwd("DunLaoighaire/")
# #convert xls files to csv files and delete the xls files
#already run so commenting out
# xls <- dir(pattern = "(.*)xls$")
# created <- mapply(convert, xls, gsub("xls", "csv", xls))
# unlink(xls)
#
# #convert xlsx files to csv files and delete the xlsx files
# xlsx <- dir(pattern = "(.*)xlsx$")
# created <- mapply(convert, xlsx, gsub("xlsx", "csv", xlsx))
# unlink(xlsx)
#NOx hourly data
#search for ppb NOX files using logical OR statement for conversion
NOx_ppb_files = dir(pattern = "NOx.*ppb|ppb.*NOx")
NOx_ppb_list = lapply(NOx_ppb_files, read_csv)
NOx_ppb = do.call(rbind, NOx_ppb_list)
#clear rows with ppb written in them
NOx_ppb = NOx_ppb[- grep("ppb", NOx_ppb$NO2),]
#convert all the NO columns from strings to numerical values for calculations
NOx_ppb[, 3:5] <- sapply(NOx_ppb[, 3:5], as.numeric)
#convert NO from ppb to ugm3. molecular weight is 30. formula is ppb x moleucular weight/22.41
NOx_ppb$NO = NOx_ppb$NO * (30/22.41)
#convert NO2 from ppb to ugm3. molecular weight is 46. formula is ppb x moleucular weight/22.41
NOx_ppb$NO2 = NOx_ppb$NO2 * (46/22.41)
#convert NOX from ppb to ugm3. molecular weight is 46. formula is ppb x moleucular weight/22.41
NOx_ppb$NOx = NOx_ppb$NOx * (46/22.41)
#search for ugm3 NOX files using logical OR statement
NOx_files = dir(pattern = "NOx.*ugm3|ugm3.*NOx")
NOx_list = lapply(NOx_files, read_csv)
NOx = do.call(rbind, NOx_list)
#clear rows with ppb written in them
NOx = NOx[- grep("ug/m3", NOx$NO2),]
#combine both type of files into one
NOx_final = rbind(NOx,NOx_ppb)
#Combine the date and the time for the different pollutants to help with graphing
NOx_final$Date <- with(NOx, as.POSIXct(paste(NOx_final$Date, NOx_final$Time), format="%Y-%m-%d %H"))
NOx_final$Time = NULL
#save the data
write_csv(NOx_final,"../Gathered_Data/Dublin_DunLaoighaire_NOx_ugm3_hr.csv")
#calculate min/max/mean for NOx
#change from string to numeric values
NOx = NOx_final
NOx$NOx = as.numeric(NOx$NOx)
NOx$NO = as.numeric(NOx$NO)
NOx$NO2 = as.numeric(NOx$NO2)
mean = aggregate(NOx[names(NOx)!='Date'], list(hour=cut(NOx$Date,'day')), mean, na.rm=F)
names(mean) <- gsub("NOx", "NOx_Mean", names(mean))
names(mean) <- gsub("NO$", "NO_Mean", names(mean))
names(mean) <- gsub("NO2", "NO2_Mean", names(mean))
min = aggregate(NOx[names(NOx)!='Date'], list(hour=cut(NOx$Date,'day')), min, na.rm=F)
names(min) <- gsub("NOx", "NOx_Min", names(min))
names(min) <- gsub("NO$", "NO_Min", names(min))
names(min) <- gsub("NO2", "NO2_Min", names(min))
max = aggregate(NOx[names(NOx)!='Date'], list(hour=cut(NOx$Date,'day')), max, na.rm=F)
names(max) <- gsub("NOx", "NOx_Max", names(max))
names(max) <- gsub("NO$", "NO_Max", names(max))
names(max) <- gsub("NO2", "NO2_Max", names(max))
#merge the data with the CO data
Dunlaoighaire_NOx_hr_MMM_daily = Reduce(function(x, y) merge(x, y, all=TRUE), list(mean,min,max))
#change column name to date
colnames(Dunlaoighaire_NOx_hr_MMM_daily)[1] = "Date"
#remove hours from data
Dunlaoighaire_NOx_hr_MMM_daily$Date = as.Date(Dunlaoighaire_NOx_hr_MMM_daily$Date,format='%Y-%m-%d %H')
#save the converted hourly data
write_csv(Dunlaoighaire_NOx_hr_MMM_daily, "../Gathered_Data/Dublin_DunLaoighaire_NOx_hr_MMM_daily.csv")
#clean the enviroment
rm(list=ls())
#DUNLAOIGHAIRE DAILY--------------------------------------------------------------------------------
#PM10 daily data
#bind the PM10 files
PM10_files = dir(pattern = "*PM10")
PM10_list = lapply(PM10_files, read_csv)
PM10 = do.call(rbind, PM10_list)
#remove headers
PM10 = PM10[- grep("ug/m3", PM10$PM10),]
#remove hours from data
PM10$Date = as.Date(PM10$Date,format='%Y-%m-%d %H')
#save the data
write_csv(PM10, "../Gathered_Data/Dublin_DunLaoighaire_PM10_ugm3_daily.csv")
#clean the enviroment
rm(list=ls())
#FINGLAS DAILY --------------------------------------------------------------------------------
setwd('..')
setwd("Finglas/")
#search for ugm3 PM25 files using logical OR statement
PM25_files = dir(pattern = "PM25")
PM25_list = lapply(PM25_files, read_xlsx)
PM25 = do.call(rbind, PM25_list)
#remove headers
PM25 = PM25[- grep("ug/m3", PM25$PM2.5),]
#save the data
write_csv(PM25, "../Gathered_Data/Dublin_Finglas_PM25_ugm3_daily.csv")
#clean the enviroment
rm(list=ls())
#KILBARRACK --------------------------------------------------------------------------------
setwd('..')
setwd("Kilbarrack/")
#search for ugm3 Pb files using logical OR statement
Pb_files = dir(pattern = "Pb")
Pb_list = lapply(Pb_files, read_xls)
Pb = do.call(rbind, Pb_list)
#remove headers
Pb = Pb[- grep("ug/m3", Pb$Pb),]
#save the data
write_csv(Pb, "../Gathered_Data/Dublin_Kilbarrack_Pb_ugm3_daily.csv")
#clean the enviroment
rm(list=ls())
#KNOCKLYON DAILY--------------------------------------------------------------------------------
setwd('..')
setwd("Knocklyon/")
#daily data
#Arsenic, As data and remove header
As <- read_excel("Dublin_Knocklyon_As_2008_ngm3_day.xls")
As = As[- grep("ng/m3", As$As),]
#Cadmium, Cd import and clean header data
Cd <- read_excel("Dublin_Knocklyon_Cd_2008_ngm3_day.xls")
Cd = Cd[- grep("ng/m3", Cd$Cd),]
#Nickel, Ni data and remove header
Ni <- read_excel("Dublin_Knocklyon_Ni_2008_ngm3_day.xls")
Ni = Ni[- grep("ng/m3", Ni$Ni),]
#import lead data, Pb, and remove the headers
Pb <- read_excel("Dublin_Knocklyon_Pb_2008_ugm3_day.xls")
Pb = Pb[- grep("ug/m3", Pb$Pb),]
#import Pm10 data and remove the headers
PM10 <- read_excel("Dublin_Knocklyon_PM10_2008_ugm3_day.xls")
PM10 = PM10[- grep("ug/m3", PM10$PM10),]
#merge daily data
Dublin_Knocklyon_As_Cd_Ni_Pb_PM10_daily = Reduce(function(x, y) merge(x, y, all=TRUE), list(As,Cd,Ni,Pb,PM10))
#save the data
write_csv(Dublin_Knocklyon_As_Cd_Ni_Pb_PM10_daily, "../Gathered_Data/Dublin_Knocklyon_As_Cd_Ni_Pb_PM10_daily.csv")
#KNOCKLYON HOURLY--------------------------------------------------------------------------------
#CO
#search for and bind the CO files
CO_files = dir(pattern = "CO.*mgm3|mgm3.*CO")
CO_list = lapply(CO_files, read_xls)
CO = do.call(rbind, CO_list)
#clear old headers
CO = CO[- grep("mg/m3", CO$CO ),]
#import the ppm files and convert to mg/m3
CO_ppm_files = dir(pattern = "CO.*ppm|ppm.*CO")
CO_ppm_list = lapply(CO_ppm_files, read_xls)
CO_ppm = do.call(rbind, CO_ppm_list)
#clear old headers
CO_ppm = CO_ppm[- grep("ppm", CO_ppm$CO ),]
#convert ppm to mgm3. CO molecular weight is 28. convert from a string to numerical values
CO_ppm$CO = as.numeric(CO_ppm$CO)
CO_ppm$CO = CO_ppm$CO * (28/22.41)
#bind the 2 CO files
CO = rbind(CO,CO_ppm)
#NOx data, import and convert from ppb to ugm3
#search for ppb NOX files using logical OR statement for conversion
NOx_ppb_files = dir(pattern = "NOx.*ppb|ppb.*NOx")
NOx_ppb_list = lapply(NOx_ppb_files, read_xls)
NOx_ppb = do.call(rbind, NOx_ppb_list)
#clear rows with ppb written in them
NOx_ppb = NOx_ppb[- grep("ppb", NOx_ppb$NO2),]
#convert all the NO columns from strings to numerical values for calculations
NOx_ppb[, 3:5] <- sapply(NOx_ppb[, 3:5], as.numeric)
#convert NO from ppb to ugm3. molecular weight is 30. formula is ppb x moleucular weight/22.41
NOx_ppb$NO = NOx_ppb$NO * (30/22.41)
#convert NO2 from ppb to ugm3. molecular weight is 46. formula is ppb x moleucular weight/22.41
NOx_ppb$NO2 = NOx_ppb$NO2 * (46/22.41)
#convert NOX from ppb to ugm3. molecular weight is 46. formula is ppb x moleucular weight/22.41
NOx_ppb$NOx = NOx_ppb$NOx * (46/22.41)
NOx = NOx_ppb
#SO2
#search for ugm3 SO2 files using logical OR statement
SO2_files = dir(pattern = "SO2.*ugm3|ugm3.*SO2")
SO2_list = lapply(SO2_files, read_xls)
SO2 = do.call(rbind, SO2_list)
#search for ppb SO2 files using logical OR statement
SO2_ppb_files = dir(pattern = "SO2.*ppb|ppb.*SO2")
SO2_ppb_list = lapply(SO2_ppb_files, read_xls)
SO2_ppb = do.call(rbind, SO2_ppb_list)
#convert SO2 ppb data to ugm3 data for consistency
SO2_ppb$SO2 = as.numeric(SO2_ppb$SO2)
SO2_ppb$SO2 = SO2_ppb$SO2 * (64/22.41)
#bind SO2 data
SO2 = rbind(SO2, SO2_ppb)
#combine CO, NOx, SO2
#Dublin_Knocklyon_CO_NOx_SO2_hr = merge(CO, NOx)
#Dublin_Knocklyon_CO_NOx_SO2_hr = merge(Dublin_Knocklyon_CO_NOx_SO2_hr, SO2)
Dublin_Knocklyon_CO_NOx_SO2_hr = Reduce(function(x, y) merge(x, y, all=TRUE), list(CO, NOx, SO2))
#bind the time and the date columns
Dublin_Knocklyon_CO_NOx_SO2_hr$Date <- with(Dublin_Knocklyon_CO_NOx_SO2_hr, as.POSIXct(paste(Dublin_Knocklyon_CO_NOx_SO2_hr$Date, Dublin_Knocklyon_CO_NOx_SO2_hr$Time), format="%Y-%m-%d %H"))
Dublin_Knocklyon_CO_NOx_SO2_hr$Time = NULL
#save the data
write_csv(Dublin_Knocklyon_CO_NOx_SO2_hr, "../Gathered_Data/Dublin_Knocklyon_CO_NOx_SO2_hr.csv")
#calculate min/max/mean for the data
#change from string to numeric values
Dublin_Knocklyon_CO_NOx_SO2_hr$NOx = as.numeric(Dublin_Knocklyon_CO_NOx_SO2_hr$NOx)
Dublin_Knocklyon_CO_NOx_SO2_hr$NO = as.numeric(Dublin_Knocklyon_CO_NOx_SO2_hr$NO)
Dublin_Knocklyon_CO_NOx_SO2_hr$NO2 = as.numeric(Dublin_Knocklyon_CO_NOx_SO2_hr$NO2)
Dublin_Knocklyon_CO_NOx_SO2_hr$CO = as.numeric(Dublin_Knocklyon_CO_NOx_SO2_hr$CO)
Dublin_Knocklyon_CO_NOx_SO2_hr$SO2 = as.numeric(Dublin_Knocklyon_CO_NOx_SO2_hr$SO2)
mean = aggregate(Dublin_Knocklyon_CO_NOx_SO2_hr[names(Dublin_Knocklyon_CO_NOx_SO2_hr)!='Date'], list(hour=cut(Dublin_Knocklyon_CO_NOx_SO2_hr$Date,'day')), mean, na.rm=F)
names(mean) <- gsub("NOx", "NOx_Mean", names(mean))
names(mean) <- gsub("NO$", "NO_Mean", names(mean))
names(mean) <- gsub("NO2", "NO2_Mean", names(mean))
names(mean) <- gsub("CO", "CO_Mean", names(mean))
names(mean) <- gsub("SO2", "SO2_Mean", names(mean))
min = aggregate(Dublin_Knocklyon_CO_NOx_SO2_hr[names(Dublin_Knocklyon_CO_NOx_SO2_hr)!='Date'], list(hour=cut(Dublin_Knocklyon_CO_NOx_SO2_hr$Date,'day')), min, na.rm=F)
names(min) <- gsub("NOx", "NOx_Min", names(min))
names(min) <- gsub("NO$", "NO_Min", names(min))
names(min) <- gsub("NO2", "NO2_Min", names(min))
names(min) <- gsub("CO", "CO_Min", names(min))
names(min) <- gsub("SO2", "SO2_Min", names(min))
max = aggregate(Dublin_Knocklyon_CO_NOx_SO2_hr[names(Dublin_Knocklyon_CO_NOx_SO2_hr)!='Date'], list(hour=cut(Dublin_Knocklyon_CO_NOx_SO2_hr$Date,'day')), max, na.rm=F)
names(max) <- gsub("NOx", "NOx_Max", names(max))
names(max) <- gsub("NO$", "NO_Max", names(max))
names(max) <- gsub("NO2", "NO2_Max", names(max))
names(max) <- gsub("CO", "CO_Max", names(max))
names(max) <- gsub("SO2", "SO2_Max", names(max))
#merge the data with the CO data
Dublin_Knocklyon_CO_NOx_SO2_hr_MMM_daily = Reduce(function(x, y) merge(x, y, all=TRUE), list(mean,min,max))
#change column name to date
colnames(Dublin_Knocklyon_CO_NOx_SO2_hr_MMM_daily)[1] = "Date"
#remove hours from data
Dublin_Knocklyon_CO_NOx_SO2_hr_MMM_daily$Date = as.Date(Dublin_Knocklyon_CO_NOx_SO2_hr_MMM_daily$Date,format='%Y-%m-%d %H')
#save the converted hourly data
write_csv(Dublin_Knocklyon_CO_NOx_SO2_hr_MMM_daily, "../Gathered_Data/Dublin_Knocklyon_CO_NOx_SO2_hr_MMM_daily.csv")
#clean the enviroment
rm(list=ls())
#MARINO HOURLY DATA --------------------------------------------------------------------------------
setwd('..')
setwd("Marino/")
#2001 data
#benzene
Benzene <- read_table2("Dublin_Marino_Benzene_TR_2001.txt", col_types = cols(X4 = col_skip()))
#CO
CO <- read_table2("Dublin_Marino_CO_TR_2001.txt", col_types = cols(X4 = col_skip()))
#NOx
NOx <- read_table2("Dublin_Marino_NOx_TR_2001.txt", col_types = cols(X6 = col_skip()))
#SO2
SO2 <- read_table2("Dublin_Marino_SO2_TR_2001.txt", col_types = cols(X4 = col_skip()))
#merge 2001 data
Dublin_Marino_Benzene_CO_NOx_SO2_2001_hr = Reduce(function(x, y) merge(x, y, all=TRUE), list(Benzene, CO, NOx, SO2))
#set date column as a date R will recognise
Dublin_Marino_Benzene_CO_NOx_SO2_2001_hr$Date <-parse_date_time(Dublin_Marino_Benzene_CO_NOx_SO2_2001_hr$Date, orders = c("dmy"))
#Combine the date and the time columns
Dublin_Marino_Benzene_CO_NOx_SO2_2001_hr$Date <- with(Dublin_Marino_Benzene_CO_NOx_SO2_2001_hr, as.POSIXct(paste(Dublin_Marino_Benzene_CO_NOx_SO2_2001_hr$Date, Dublin_Marino_Benzene_CO_NOx_SO2_2001_hr$Time), format="%Y-%m-%d %H"))
Dublin_Marino_Benzene_CO_NOx_SO2_2001_hr$Time = NULL
#export the data, no data overlaps so 3 different files for this area
write_csv(Dublin_Marino_Benzene_CO_NOx_SO2_2001_hr, "../Gathered_Data/Dublin_Marino_Benzene_CO_NOx_SO2_2001_hr.csv")
#check for rows with complete data
#Dublin_Marino_Benzene_CO_NOx_SO2_2001_hr[complete.cases(Dublin_Marino_Benzene_CO_NOx_SO2_2001_hr), ]
#too little data, not worth getting min/max/max
#MARINO DAILY DATA --------------------------------------------------------------------------------
#PM2.5
#bind the PM2.5 files
PM25_files = dir(pattern = "*PM25")
PM25_list = lapply(PM25_files, read_xlsx)
PM25 = do.call(rbind, PM25_list)
#clean the data of headers
PM25 = PM25[- grep("ug/m3", PM25$PM2.5),]
#PM10 data
#import txt data first
PM10_files = dir(pattern = "*PM10(.*)txt$")
PM10_list = lapply(PM10_files, read_table2)
PM10 = do.call(rbind, PM10_list)
#change the column name for binding
colnames(PM10)[2] = "PM10"
#sort the dates out for merging with lubridate, the dates have different formats
mdy = mdy(PM10$Date)
dmy = dmy(PM10$Date)
mdy[is.na(mdy)] = dmy[is.na(mdy)]
PM10$Date = mdy
#import xls files
PM10_files = dir(pattern = "*PM10(.*)xls$")
PM10_list = lapply(PM10_files, read_xls)
PM10_xls = do.call(rbind, PM10_list)
#bind the two data sets
PM10 = rbind(PM10,PM10_xls)
#remove hours from data
PM25$Date = as.Date(PM25$Date,format='%Y-%m-%d %H')
#export the data, no data overlaps so different files for this area
write_csv(PM10, "../Gathered_Data/Dublin_Marino_PM10_daily.csv")
write_csv(PM25, "../Gathered_Data/Dublin_Marino_PM25_daily.csv")
#clean the enviroment
rm(list=ls())
#PHOENIX PARK DAILY --------------------------------------------------------------------------------
setwd('..')
setwd("PhoenixPark/")
#import txt data first
PM10_files = dir(pattern = "*PM10(.*)txt$")
PM10_list = lapply(PM10_files, read_table2)
PM10 = do.call(rbind, PM10_list)
#change the column name for binding
colnames(PM10)[2] = "PM10"
#rearrange the date for binding
mdy = mdy(PM10$Date)
dmy = dmy(PM10$Date)
mdy[is.na(mdy)] = dmy[is.na(mdy)]
PM10$Date = mdy
#import xls data
PM10_files = dir(pattern = "*PM10(.*)xls$")
PM10_list = lapply(PM10_files, read_xls)
PM10_xls = do.call(rbind, PM10_list)
#import xls data
PM10_files = dir(pattern = "*PM10(.*)xlsx$")
PM10_list = lapply(PM10_files, read_xlsx)
PM10_xlsx = do.call(rbind, PM10_list)
#bind the data
PM10 = rbind(PM10, PM10_xls, PM10_xlsx)
#sort the data by date
PM10 = PM10[order(as.Date(PM10$Date, format="%Y/%m/%d")),]
#clean the data of headers
PM10 = PM10[- grep("ug/m3", PM10$PM10),]
#save the gathered data
write_csv(PM10, "../Gathered_Data/Dublin_PhoenixPark_PM10_daily.csv")
#clean the enviroment
rm(list=ls())
#RATHMINES MONTHLY --------------------------------------------------------------------------------
#MONTHLY DATA
setwd('..')
setwd("Rathmines/")
#Asrsenic, AS
#import xls data
As_files = dir(pattern = "As")
As_list = lapply(As_files, read_xlsx)
As = do.call(rbind, As_list)
#clean the data
As = As[- grep("ng/m3", As$As),]
#create a date column
As$Date = seq(as.Date("2009/1/1"), by = "month", length.out = nrow(As))
#remove old date months and replace with new date column and swap so the date column is first
As$X__1 = NULL
As = As[,c(2,1)]
#BaP
BaP_files = dir(pattern = "BaP")
BaP_list = lapply(BaP_files, read_xlsx)
BaP = do.call(rbind, BaP_list)
#clean the data
BaP = BaP[- grep("ng/m3", BaP$`B(a)P`),]
#create a date column
BaP$Date = seq(as.Date("2010/1/1"), by = "month", length.out = nrow(BaP))
#remove old date months and replace with new date column and swap so the date column is first
BaP$X__1 = NULL
BaP = BaP[,c(2,1)]
#Cadmium, Cd
Cd_files = dir(pattern = "Cd")
Cd_list = lapply(Cd_files, read_xlsx)
Cd = do.call(rbind, Cd_list)
#clean the data
Cd = Cd[- grep("ng/m3", Cd$Cd ),]
#create a date column
Cd$Date = seq(as.Date("2009/1/1"), by = "month", length.out = nrow(Cd))
#remove old date months and replace with new date column and swap so the date column is first
Cd$X__1 = NULL
Cd = Cd[,c(2,1)]
#Nickel, Ni
Ni_files = dir(pattern = "Ni")
Ni_list = lapply(Ni_files, read_xlsx)
Ni = do.call(rbind, Ni_list)
#clean the data
Ni = Ni[- grep("ng/m3", Ni$Ni ),]
#create a date column
Ni$Date = seq(as.Date("2009/1/1"), by = "month", length.out = nrow(Ni))
#remove old date months and replace with new date column and swap so the date column is first
Ni$X__1 = NULL
Ni = Ni[,c(2,1)]
#Lead, Pb
#library(readbulk)
#import xlsx files - monthly data
Pb_files = dir(pattern = "*Pb(.*)xlsx$")
Pb_list = lapply(Pb_files, read_xlsx)
Pb = do.call(rbind, Pb_list)
#clean the data
Pb = Pb[- grep("ng/m3", Pb$Pb ),]
#create a date column
Pb$Date = seq(as.Date("2009/1/1"), by = "month", length.out = nrow(Pb))
#remove old date months and replace with new date column and swap so the date column is first
Pb$X__1 = NULL
Pb = Pb[,c(2,1)]
#merge monthly data
Dublin_Rathmines_As_BaP_Cd_Ni_Pb_monthly = Reduce(function(x, y) merge(x, y, all=TRUE), list(As, BaP, Cd, Ni, Pb))
#change the date format to just year/month
Dublin_Rathmines_As_BaP_Cd_Ni_Pb_monthly$Date <- format(as.Date(Dublin_Rathmines_As_BaP_Cd_Ni_Pb_monthly$Date), "%Y-%m")
#save the gathered data
write_csv(Dublin_Rathmines_As_BaP_Cd_Ni_Pb_monthly, "../Gathered_Data/Dublin_Rathmines_As_BaP_Cd_Ni_Pb_monthly.csv")
#clean the enviroment
rm(list=ls())
#RATHMINES DAILY------------------------------------------------------------------
#PB daily files
#import xls files- daily date
Pb_files = dir(pattern = "*Pb(.*)xls$")
Pb_list = lapply(Pb_files, read_xls)
Pb = do.call(rbind, Pb_list)
#clean the data
Pb = Pb[- grep("ug/m3", Pb$Pb ),]
Pb$Date = as.Date(Pb$Date)
#Benzene hourly
Benzene_files = dir(pattern = "*Benzene.*hr")
Benzene_list = lapply(Benzene_files, read_xls)
Benzene_hr = do.call(rbind, Benzene_list)
#clean the data
Benzene_hr = Benzene_hr[- grep("ug/m3", Benzene_hr$Benzene ),]
Benzene_hr$Time = NULL
#calcuate the daily average for the hourly dataset
Benzene_hr$Date = as.Date(Benzene_hr$Date)
Benzene_hr$Benzene = as.numeric(Benzene_hr$Benzene)
Benzene_daily = aggregate(cbind(Benzene_hr$Benzene) ~ Date, Benzene_hr, mean)
#change the column name for binding
colnames(Benzene_daily)[2] = "Benzene"
#Benzene daily files xls
Benzene_files = dir(pattern = "*Benzene.*day(.*)xls$")
Benzene_list = lapply(Benzene_files, read_xls)
Benzene = do.call(rbind, Benzene_list)
#benzene files xlsx
Benzene_files = dir(pattern = "*Benzene.*day(.*)xlsx$")
Benzene_list = lapply(Benzene_files, read_xlsx)
Benzene_xlxs = do.call(rbind, Benzene_list)
#bind the coverted hourly data and the xls and xlxs files
Benzene = rbind(Benzene_daily, Benzene, Benzene_xlxs)
#plot(Benzene)
#sort the data by date
Benzene = Benzene[order(as.Date(Benzene$Date, format="%Y/%m/%d")),]
#clean the data
Benzene = Benzene[- grep("ug/m3", Benzene$Benzene ),]
#Ethylbenzene
Ethylbenzene_files = dir(pattern = "*ethylbenzene")
Ethylbenzene_list = lapply(Ethylbenzene_files, read_xlsx)
#rows wouldnt bind due to lowercase 'e' in the header of the 2010 file. Changed this manually in excel
#bind the data
Ethylbenzene = do.call(rbind, Ethylbenzene_list)
#clean the data
Ethylbenzene = Ethylbenzene[- grep("ug/m3", Ethylbenzene$Ethylbenzene ),]
#MP Xylene
#import the data and change the column names fpr binding
Xylene_files = dir(pattern = "*MP_xylene.*ppb(.*)xlsx$")
Xylene_list = lapply(Xylene_files, read_xlsx)
colnames = c("Date", "m,p xylene")
Xylene_list <- lapply(Xylene_list, setNames, colnames)
Xylene_ppb = do.call(rbind, Xylene_list)
#convert from ppb to ugm3
Xylene_ppb$`m,p xylene`= as.numeric(Xylene_ppb$`m,p xylene`)
Xylene_ppb$`m,p xylene` = Xylene_ppb$`m,p xylene` * (106/22.41)
#import ugm3 files
Xylene_files = dir(pattern = "mp_xylene.*ugm3(.*)xlsx$")
Xylene_list = lapply(Xylene_files, read_xlsx)
#rename columns for binding and then bind the files
Xylene_list <- lapply(Xylene_list, setNames, colnames)
mp_Xylene = do.call(rbind, Xylene_list)
#bind the ppb and ugm3 datasets
mp_Xylene = rbind(mp_Xylene, Xylene_ppb)
#clean the data
mp_Xylene = mp_Xylene[- grep("ug/m3", mp_Xylene$`m,p xylene` ),]
##sort the data by date
mp_Xylene = mp_Xylene[order(as.Date(mp_Xylene$Date, format="%Y/%m/%d")),]
#o_Xylene
#ppb file import and conversion to ugm3
oXylene_files = dir(pattern = "*o_xylene.*ppb")
oXylene_list = lapply(oXylene_files, read_xlsx)
oXylene_ppb = do.call(rbind, oXylene_list)
#convert from ppb to ugm3. convert from sting to numerals
oXylene_ppb$`o-xylene` = as.numeric(oXylene_ppb$`o-xylene`) * (106/22.41)
#change column name for binding
colnames(oXylene_ppb)[2] = "oXylene"
#read data in
oXylene_files = dir(pattern = "*o_xylene.*ugm3")
oXylene_list = lapply(oXylene_files, read_xlsx)
#change the header names so they are all the same for binding
colnames = c("Date", "oXylene")
oXylene_list <- lapply(oXylene_list, setNames, colnames)
oXylene = do.call(rbind, oXylene_list)
#bind the ppb and ugm3 datasets
oXylene = rbind(oXylene, oXylene_ppb)
#clean the data
oXylene = oXylene[- grep("ug/m3", oXylene$oXylene ),]
##sort the data by date
oXylene = oXylene[order(as.Date(oXylene$Date, format="%Y/%m/%d")),]
#PM10 files
#txt
PM10_files = dir(pattern = "*PM10.*(.*)txt$")
PM10_list = lapply(PM10_files, read_table2)
PM10_txt = do.call(rbind, PM10_list)
colnames(PM10_txt)[2] = "PM10"
#change the date format for binding
PM10_txt$Date = parse_date_time(PM10_txt$Date, c('dmy', 'ymd'))
#xls
PM10_files = dir(pattern = "*PM10.*(.*)xls$")
PM10_list = lapply(PM10_files, read_xls)
PM10_xls = do.call(rbind, PM10_list)
#xlsx
PM10_files = dir(pattern = "*PM10.*(.*)xlsx$")
PM10_list = lapply(PM10_files, read_xlsx)
PM10_xlsx = do.call(rbind, PM10_list)
#bind all data together
PM10 = rbind(PM10_txt, PM10_xls, PM10_xlsx)
#clean the data
PM10 = PM10[- grep("ug/m3", PM10$PM10 ),]
#convert 2003 file from hourly to daily data to file in a gap in the data
PM10_2003_hr <- read_excel("Dublin_Rathmines_pm10_2003_hr.xls")
PM10_2003_hr$Hour = NULL
#clean the data
PM10_2003_hr = PM10_2003_hr[- grep("ug/m3", PM10_2003_hr$PM10 ),]
PM10_2003_hr$Date = as.Date(PM10_2003_hr$Date)
#compute daily averages
PM10_2003_hr$PM10 = as.numeric(PM10_2003_hr$PM10)
#find nas in case they are causing issues when trying to calculate the daily average
sum(is.na(PM10_2003_hr$Date))
#remove nas
PM10_2003_hr = PM10_2003_hr[!is.na(PM10_2003_hr$Date), ]
#calcuate the daily average
PM10_2003_daily = aggregate(cbind(PM10_2003_hr$PM10) ~ Date, PM10_2003_hr, mean)
#combine covereted hourly data with the rest of the daily data
colnames(PM10_2003_daily)[2] = "PM10"
PM10 = rbind(PM10, PM10_2003_daily)
#sort the data by date
PM10 = PM10[order(as.Date(PM10$Date, format="%Y/%m/%d")),]
#plot(PM10)
#PM25
#import PM25 files
PM25_files = dir(pattern = "*PM25.*(.*)xlsx$")
PM25_list = lapply(PM25_files, read_xlsx)
#rename columns for binding and then bind the files
colnames = c("Date", "PM25")
PM25_list <- lapply(PM25_list, setNames, colnames)
PM25 = do.call(rbind, PM25_list)
#Toulene
Toluene_files = dir(pattern = "*Toluene")
Toluene_list = lapply(Toluene_files, read_xlsx)
Toluene = do.call(rbind, Toluene_list)
#clean the data
Toluene = Toluene[- grep("ug/m3", Toluene$Toluene ),]
Benzene$Date = as.Date(Benzene$Date, format = "%Y-%m-%d")
#merge the daily data together
Dublin_Rathmines_Benzene_Ethlybenzene_mpXylene_oXylene_Pb_Toluene_daily = Reduce(function(x, y) merge(x, y, all=TRUE), list(Ethylbenzene, mp_Xylene, oXylene, Pb, Toluene))
#Dublin_Rathmines_Benzene_Ethlybenzene_mpXylene_oXylene_Pb_Toluene_daily = Reduce(function(x, y) merge(x, y, all=TRUE), list(Benzene, Ethylbenzene, mp_Xylene, oXylene, Pb, Toluene))
#TODO
#something weird with benzene- cant work it out come back!
#Dublin_Rathmines_Benzene_Ethlybenzene_mpXylene_oXylene_Pb_Toluene_daily = Reduce(function(x, y) merge(x, y, all=TRUE), list(Dublin_Rathmines_Benzene_Ethlybenzene_mpXylene_oXylene_Pb_Toluene_daily, Benzene))
#save the gathered data
write_csv(Dublin_Rathmines_Benzene_Ethlybenzene_mpXylene_oXylene_Pb_Toluene_daily,"../Gathered_Data/Dublin_Rathmines_Benzene_Ethlybenzene_mpXylene_oXylene_Pb_Toluene_daily.csv")
#merge the daily data together for PM
Dublin_Rathmines_PM10_PM25_daily = Reduce(function(x, y) merge(x, y, all=TRUE), list(PM10, PM25))
#clean the data
Dublin_Rathmines_PM10_PM25_daily = Dublin_Rathmines_PM10_PM25_daily[- grep("ug/m3", Dublin_Rathmines_PM10_PM25_daily$PM25 ),]
#remove hours from data
Dublin_Rathmines_PM10_PM25_daily$Date = as.Date(Dublin_Rathmines_PM10_PM25_daily$Date,format='%Y-%m-%d %H')
#save the gathered data
write_csv(Dublin_Rathmines_PM10_PM25_daily, "../Gathered_Data/Dublin_Rathmines_PM10_PM25_daily.csv")
#clean the enviroment
rm(list=ls())
#RATHMINES HOURLY --------------------------------------------------------------------
#NOx
#covert the ppb files to ugm3
#NOx text files
NOx_files = dir(pattern = "*NOx*(.*)txt")
NOx_list = lapply(NOx_files, read_table2)
NOx_txt = do.call(rbind, NOx_list)
#have to change Headers of the txt files so they match for binding with the xls and xlsx files
colnames(NOx_txt)[2] = "Time"
colnames(NOx_txt)[3] = "NOx"
colnames(NOx_txt)[4] = "NO"
colnames(NOx_txt)[5] = "NO2"
#change the date format for binding
NOx_txt$Date = parse_date_time(NOx_txt$Date, c('dmy', 'ymd'))
#Combine the date and the time columns
NOx_txt$Date <- with(NOx_txt, as.POSIXct(paste(NOx_txt$Date, NOx_txt$Time), format="%Y-%m-%d %H"))
NOx_txt$Time = NULL
#NOx xls files
NOx_files = dir(pattern = "*NOx*(.*)xls$")
NOx_list = lapply(NOx_files, read_xls)
#have to change Hour column to Time and have to change headers Nox to NOx for binding
NOx_list <- lapply(NOx_list, function(x) setNames(x, gsub("^Hour$", "Time", names(x))) )
NOx_list <- lapply(NOx_list, function(x) setNames(x, gsub("^Nox$", "NOx", names(x))) )
#bind the files
NOx_xls = do.call(rbind, NOx_list)
#Combine the date and the time columns
NOx_xls$Date <- with(NOx_xls, as.POSIXct(paste(NOx_xls$Date, NOx_xls$Time), format="%Y-%m-%d %H"))
NOx_xls$Time = NULL
#clean the data
NOx_xls = NOx_xls[- grep("ppb", NOx_xls$NO ),]
#import the ppb xlxs file
NOx_xlsx <- read_excel("Dublin_Rathmines_NOx_2010_ppb_hr.xlsx")
#Combine the date and the time columns
NOx_xlsx$Date <- with(NOx_xlsx, as.POSIXct(paste(NOx_xlsx$Date, NOx_xlsx$Time), format="%Y-%m-%d %H"))
NOx_xlsx$Time = NULL
#bind all the ppb data
NOx_ppb = rbind(NOx_xls, NOx_txt, NOx_xlsx)
#convert all the NO columns from strings to numerical values for calculations
NOx_ppb[, 2:4] <- sapply(NOx_ppb[, 2:4], as.numeric)
#convert ppb to ugm3. NB NOx additions dont add up from NO and NO2 like in the newer datsets
#maybe as a result of the results being rounded to nearest numbers here
NOx_ppb$NO = NOx_ppb$NO * (30/22.41)
NOx_ppb$NO2 = NOx_ppb$NO2 * (46/22.41)
NOx_ppb$NOx = NOx_ppb$NOx * (46/22.41)
#import NOx ugm3 files
NOx_files = dir(pattern = "*NOx.*ugm3")
NOx_list = lapply(NOx_files, read_xlsx)
NOx_xlsx = do.call(rbind, NOx_list)
#clean the data
NOx_xlsx = NOx_xlsx[- grep("ug/m3", NOx_xlsx$NO ),]
#combine date and time columes
NOx_xlsx$Date <- with(NOx_xlsx, as.POSIXct(paste(NOx_xlsx$Date, NOx_xlsx$Time), format="%Y-%m-%d %H"))
NOx_xlsx$Time = NULL
#bind all of the NOx data
NOx = rbind(NOx_xlsx, NOx_ppb)
#plot(NOx)
#sort the data by date
NOx = NOx[order(as.Date(NOx$Date, format="%Y/%m/%d")),]
#OZONE HOURLY DATA
#import Ozone txt files
O3_files = dir(pattern = "*O3(.*)txt$")
O3_list = lapply(O3_files, read_table2)
O3_txt = do.call(rbind, O3_list)
#rename the columns for binding
colnames(O3_txt)[3] = "ozone"
#change the date structure
O3_txt$Date = parse_date_time(O3_txt$Date, c('dmy', 'ymd'))
#import the xls O3 files
O3_files = dir(pattern = "*O3(.*)xls$")
O3_list = lapply(O3_files, read_xls)
#alter headers that have hour instead of time
O3_list <- lapply(O3_list, function(x) setNames(x, gsub("^Hour$", "Time", names(x))) )
#bind the files in the list
O3_xls = do.call(rbind, O3_list)
#import the xlsx O3 ppb files
O3_files = dir(pattern = "*O3.*ppb(.*)xlsx$")
O3_list = lapply(O3_files, read_xlsx)
O3_xlsx_ppb = do.call(rbind, O3_list)
#convert ppb to ugm3
O3_xlsx_ppb$ozone = as.numeric(O3_xlsx_ppb$ozone) * (48/22.41)
#import xlxs ugm3 ozone files
O3_files = dir(pattern = "*O3.*ugm3(.*)xlsx$")
O3_list = lapply(O3_files, read_xlsx)
O3_xlsx = do.call(rbind, O3_list)
#bind all the ozone files
O3 = rbind(O3_xlsx, O3_xlsx_ppb, O3_xls, O3_txt)
#sort the data by date
O3 = O3[order(as.Date(O3$Date, format="%Y/%m/%d")),]
#clean the data
O3 = O3[- grep("ug/m3", O3$ozone ),]
O3 = O3[- grep("mg/m3", O3$ozone ),]
#Combine the date and the time columns
O3$Date <- with(O3, as.POSIXct(paste(O3$Date, O3$Time), format="%Y-%m-%d %H"))
O3$Time = NULL
#Suplur dioxide, SO2
#import the text files
SO2_files = dir(pattern = "*SO2(.*)txt$")
SO2_list = lapply(SO2_files, read_table2)
SO2_txt = do.call(rbind, SO2_list)
#rename the columns for binding
colnames(SO2_txt)[3] = "SO2"
#change the date structure
SO2_txt$Date = parse_date_time(SO2_txt$Date, c('dmy', 'ymd'))
#import the xls SO2 files
SO2_files = dir(pattern = "*SO2(.*)xls$")
SO2_list = lapply(SO2_files, read_xls)
SO2_xls = do.call(rbind, SO2_list)
#change the column name for binding
colnames(SO2_xls)[2] = "Time"
#import the xlsx SO2 ppb files
SO2_files = dir(pattern = "*SO2.*ppb(.*)xlsx$")
SO2_list = lapply(SO2_files, read_xlsx)
SO2_xlsx_ppb = do.call(rbind, SO2_list)
#convert ppb to ugm3
SO2_xlsx_ppb$SO2 = as.numeric(SO2_xlsx_ppb$SO2) * (64/22.41)
#import xlxs ugm3 SO2 files
SO2_files = dir(pattern = "*SO2.*ugm3(.*)xlsx$")
SO2_list = lapply(SO2_files, read_xlsx)
SO2_xlsx = do.call(rbind, SO2_list)
#bind all SO2 files
SO2 = rbind(SO2_txt, SO2_xls, SO2_xlsx_ppb, SO2_xlsx)
#clean the data
SO2 = SO2[- grep("ug/m3", SO2$SO2 ),]
#sort the data by date
SO2 = SO2[order(as.Date(SO2$Date, format="%Y/%m/%d")),]
#atomic vector issue when combinding the date and hour
SO2atomicvector = SO2
#Combine the date and the time columns
SO2atomicvector$Date <- with(SO2atomicvector, as.POSIXct(paste(SO2atomicvector$Date, SO2atomicvector$Time), format="%Y-%m-%d %H"))
SO2atomicvector$Time = NULL
SO2 = SO2atomicvector
#remove nas from Date columns to prevent a thousands of NAs being genereated when merging the datasets due to many permutuations of merging the NA rows
NOx = NOx[!is.na(NOx$Date), ]
SO2 = SO2[!is.na(SO2$Date), ]
O3 = O3[!is.na(O3$Date), ]
#combine the hourly datasets for Rathmines
#merge the daily data together
Dublin_Rathmines_NOx_O3_SO2_hr = Reduce(function(x, y) merge(x, y, all=TRUE), list(NOx, O3, SO2))
#save the gathered data
write_csv(Dublin_Rathmines_NOx_O3_SO2_hr, "../Gathered_Data/Dublin_Rathmines_NOx_O3_SO2_hr.csv")
#calculate min/max/mean for the data
#change from string to numeric values
Dublin_Rathmines_NOx_O3_SO2_hr$NOx = as.numeric(Dublin_Rathmines_NOx_O3_SO2_hr$NOx)
Dublin_Rathmines_NOx_O3_SO2_hr$NO = as.numeric(Dublin_Rathmines_NOx_O3_SO2_hr$NO)
Dublin_Rathmines_NOx_O3_SO2_hr$NO2 = as.numeric(Dublin_Rathmines_NOx_O3_SO2_hr$NO2)
Dublin_Rathmines_NOx_O3_SO2_hr$ozone = as.numeric(Dublin_Rathmines_NOx_O3_SO2_hr$ozone)
Dublin_Rathmines_NOx_O3_SO2_hr$SO2 = as.numeric(Dublin_Rathmines_NOx_O3_SO2_hr$SO2)
mean = aggregate(Dublin_Rathmines_NOx_O3_SO2_hr[names(Dublin_Rathmines_NOx_O3_SO2_hr)!='Date'], list(hour=cut(Dublin_Rathmines_NOx_O3_SO2_hr$Date,'day')), mean, na.rm=F)
names(mean) <- gsub("NOx", "NOx_Mean", names(mean))
names(mean) <- gsub("NO$", "NO_Mean", names(mean))
names(mean) <- gsub("NO2", "NO2_Mean", names(mean))
names(mean) <- gsub("ozone", "ozone_Mean", names(mean))
names(mean) <- gsub("SO2", "SO2_Mean", names(mean))
min = aggregate(Dublin_Rathmines_NOx_O3_SO2_hr[names(Dublin_Rathmines_NOx_O3_SO2_hr)!='Date'], list(hour=cut(Dublin_Rathmines_NOx_O3_SO2_hr$Date,'day')), min, na.rm=F)
names(min) <- gsub("NOx", "NOx_Min", names(min))
names(min) <- gsub("NO$", "NO_Min", names(min))
names(min) <- gsub("NO2", "NO2_Min", names(min))
names(min) <- gsub("ozone", "ozone_Min", names(min))
names(min) <- gsub("SO2", "SO2_Min", names(min))
max = aggregate(Dublin_Rathmines_NOx_O3_SO2_hr[names(Dublin_Rathmines_NOx_O3_SO2_hr)!='Date'], list(hour=cut(Dublin_Rathmines_NOx_O3_SO2_hr$Date,'day')), max, na.rm=F)
names(max) <- gsub("NOx", "NOx_Max", names(max))
names(max) <- gsub("NO$", "NO_Max", names(max))
names(max) <- gsub("NO2", "NO2_Max", names(max))
names(max) <- gsub("ozone", "ozone_Max", names(max))
names(max) <- gsub("SO2", "SO2_Max", names(max))
#merge the data with the CO data
Dublin_Rathmines_NOx_O3_SO2_MMM_hr = Reduce(function(x, y) merge(x, y, all=TRUE), list(mean,min,max))
#change column name to date
colnames(Dublin_Rathmines_NOx_O3_SO2_MMM_hr)[1] = "Date"
#remove hours from data
Dublin_Rathmines_NOx_O3_SO2_MMM_hr$Date = as.Date(Dublin_Rathmines_NOx_O3_SO2_MMM_hr$Date,format='%Y-%m-%d %H')
#save the converted hourly data
write_csv(Dublin_Rathmines_NOx_O3_SO2_MMM_hr, "../Gathered_Data/Dublin_Rathmines_NOx_O3_SO2_MMM_hr_MMM_daily.csv")
#clean the enviroment
rm(list=ls())
#RINGSEND DAILY --------------------------------------------------------------------------------
setwd('..')
setwd("Ringsend/")
#BENZENE
#import Benzene data
benzene_files = dir(pattern = "*Benzene")
benzene_list = lapply(benzene_files, read_xlsx)
Benzene = do.call(rbind, benzene_list)
#clean the data
Benzene = Benzene[- grep("ug/m3", Benzene$Benzene ),]
#PM10
#import xls files
PM10_files = dir(pattern = "PM10.*xls$")
PM10_list = lapply(PM10_files, read_xls)
PM10_xls = do.call(rbind, PM10_list)
#import xlsx files
PM10_files = dir(pattern = "PM10.*xlsx$")
PM10_list = lapply(PM10_files, read_xlsx)
PM10_xlsx = do.call(rbind, PM10_list)
#bind the files together. clean the data of headers
PM10 = rbind(PM10_xls, PM10_xlsx)
PM10 = PM10[- grep("ug/m3", PM10$PM10),]
#Toulene
#import xlsx files
Toluene_files = dir(pattern = "Toluene.*xlsx$")
Toluene_list = lapply(Toluene_files, read_xlsx)
Toluene = do.call(rbind, Toluene_list)
#clean the data of headers
Toluene = Toluene[- grep("ug/m3", Toluene$Toluene),]
#merge daily data for Ringsend
Dublin_Ringsend_Benzene_PM10_Toluene_daily = Reduce(function(x, y) merge(x, y, all=TRUE), list(Benzene, PM10, Toluene))
#remove hours from data
Dublin_Ringsend_Benzene_PM10_Toluene_daily$Date = as.Date(Dublin_Ringsend_Benzene_PM10_Toluene_daily$Date,format='%Y-%m-%d %H')
#save the gathered data
write_csv(Dublin_Ringsend_Benzene_PM10_Toluene_daily, "../Gathered_Data/Dublin_Ringsend_Benzene_PM10_Toluene_daily.csv")
#clean the enviroment
rm(list=ls())
#RINGSEND HOURLY --------------------------------------------------------------------------------
#CARBON MONOXIDE, CO
#import CO ppm data
CO_files = dir(pattern = "CO.*ppm|ppm.*CO")
CO_list = lapply(CO_files, read_xlsx)
CO_ppm = do.call(rbind, CO_list)
#convert ppm to mgm3. CO molecular weight is 28
CO_ppm$CO = as.numeric(CO_ppm$CO)
CO_ppm$CO = CO_ppm$CO * (28/22.41)
#import CO mgm3 data
CO_files = dir(pattern = "CO.*mgm3|ppm.*mgm3")
CO_list = lapply(CO_files, read_xlsx)
CO = do.call(rbind, CO_list)
#bind the 2 CO files
CO_final = rbind(CO,CO_ppm)
#combine the date and time columns
CO_final$Date <- with(CO_final, as.POSIXct(paste(CO_final$Date , CO_final$Time), format="%Y-%m-%d %H"))
CO_final$Time = NULL
#clean the data
CO_final = CO_final[- grep("mg/m3", CO_final$CO),]
#change name for consistency
CO = CO_final
#NOx
#NOx ppb files conversion
ppb_NOx = dir(pattern = "NOx.*ppb|ppb.*NOx")
NOx_list = lapply(ppb_NOx, read_xlsx)
NOx_ppb = do.call(rbind, NOx_list)
#clear rows with ppb written in them
NOx_ppb = NOx_ppb[- grep("ppb", NOx_ppb$NO2),]
#convert all the NO columns from strings to numerical values for calculations
NOx_ppb[, 3:5] <- sapply(NOx_ppb[, 3:5], as.numeric)
#is.numeric(NOx_ppb$NO2)
#convert from ppb to ugm3. molecular weight/22.41
NOx_ppb$NOx = NOx_ppb$NOx * (46/22.41)
NOx_ppb$NO = NOx_ppb$NO * (30/22.41)
NOx_ppb$NO2 = NOx_ppb$NO2 * (46/22.41)
#bind the NOx files chronologically
NOx_files = dir(pattern = "NOx.*ugm3|ugm3.*NOx")
NOx_list = lapply(NOx_files, read_xlsx)
NOx_ugm3 = do.call(rbind, NOx_list)
#clean the data of headings
NOx_ugm3 = NOx_ugm3[- grep("ug/m3", NOx_ugm3$NOx),]
#bind the ppm and ugm3 files
NOx_final = rbind(NOx_ppb, NOx_ugm3)
#combine the date and time columns
NOx_final$Date <- with(NOx_final, as.POSIXct(paste(NOx_final$Date , NOx_final$Time), format="%Y-%m-%d %H"))
NOx_final$Time = NULL
#change name for consistency-aids with atomic errors
NOx = NOx_final
#SO2
#search for ppb SO2 files using logical OR statement
SO2_ppb_files = dir(pattern = "SO2.*ppb|ppb.*SO2")
SO2_ppb_list = lapply(SO2_ppb_files, read_xlsx)
SO2_ppb = do.call(rbind, SO2_ppb_list)
#convert ppb to ugm3
SO2_ppb$SO2 = as.numeric(SO2_ppb$SO2) * (64/22.41)
#search for ugm3 NOX files using logical OR statement
SO2_files = dir(pattern = "SO2.*ugm3|ugm3.*SO2")
SO2_list = lapply(SO2_files, read_xlsx)
SO2_ugm3 = do.call(rbind, SO2_list)
#bind SO2 data
SO2 = rbind(SO2_ugm3, SO2_ppb)
#clean possible headings
SO2 = SO2[- grep("ug/m3", SO2$SO2),]
#atomic vector error workaround
SO2_ave = SO2
#combine the date and time columns
SO2_ave$Date <- with(SO2_ave, as.POSIXct(paste(SO2_ave$Date , SO2_ave$Time), format="%Y-%m-%d %H"))
SO2_ave$Time = NULL
SO2 = SO2_ave
#merge daily data for Ringsend
Dublin_Ringsend_CO_NOx_SO2_hr = Reduce(function(x, y) merge(x, y, all=TRUE), list(CO, NOx, SO2))
Dublin_Ringsend_CO_NOx_SO2_hr$Time = NULL
#save the gathered data
write_csv(Dublin_Ringsend_CO_NOx_SO2_hr, "../Gathered_Data/Dublin_Ringsend_CO_NOx_SO2_hr.csv")
#calculate min/max/mean for the data
#change from string to numeric values
Dublin_Ringsend_CO_NOx_SO2_hr$NOx = as.numeric(Dublin_Ringsend_CO_NOx_SO2_hr$NOx)
Dublin_Ringsend_CO_NOx_SO2_hr$NO = as.numeric(Dublin_Ringsend_CO_NOx_SO2_hr$NO)
Dublin_Ringsend_CO_NOx_SO2_hr$NO2 = as.numeric(Dublin_Ringsend_CO_NOx_SO2_hr$NO2)
Dublin_Ringsend_CO_NOx_SO2_hr$CO = as.numeric(Dublin_Ringsend_CO_NOx_SO2_hr$CO)
Dublin_Ringsend_CO_NOx_SO2_hr$SO2 = as.numeric(Dublin_Ringsend_CO_NOx_SO2_hr$SO2)
mean = aggregate(Dublin_Ringsend_CO_NOx_SO2_hr[names(Dublin_Ringsend_CO_NOx_SO2_hr)!='Date'], list(hour=cut(Dublin_Ringsend_CO_NOx_SO2_hr$Date,'day')), mean, na.rm=F)
names(mean) <- gsub("NOx", "NOx_Mean", names(mean))
names(mean) <- gsub("NO$", "NO_Mean", names(mean))
names(mean) <- gsub("NO2", "NO2_Mean", names(mean))
names(mean) <- gsub("CO", "CO_Mean", names(mean))
names(mean) <- gsub("SO2", "SO2_Mean", names(mean))
min = aggregate(Dublin_Ringsend_CO_NOx_SO2_hr[names(Dublin_Ringsend_CO_NOx_SO2_hr)!='Date'], list(hour=cut(Dublin_Ringsend_CO_NOx_SO2_hr$Date,'day')), min, na.rm=F)
names(min) <- gsub("NOx", "NOx_Min", names(min))
names(min) <- gsub("NO$", "NO_Min", names(min))
names(min) <- gsub("NO2", "NO2_Min", names(min))
names(min) <- gsub("CO", "CO_Min", names(min))
names(min) <- gsub("SO2", "SO2_Min", names(min))
max = aggregate(Dublin_Ringsend_CO_NOx_SO2_hr[names(Dublin_Ringsend_CO_NOx_SO2_hr)!='Date'], list(hour=cut(Dublin_Ringsend_CO_NOx_SO2_hr$Date,'day')), max, na.rm=F)
names(max) <- gsub("NOx", "NOx_Max", names(max))
names(max) <- gsub("NO$", "NO_Max", names(max))
names(max) <- gsub("NO2", "NO2_Max", names(max))
names(max) <- gsub("CO", "CO_Max", names(max))
names(max) <- gsub("SO2", "SO2_Max", names(max))
#merge the data
Dublin_Ringsend_CO_NOx_SO2_MMM_hr = Reduce(function(x, y) merge(x, y, all=TRUE), list(mean,min,max))
#change column name to date
colnames(Dublin_Ringsend_CO_NOx_SO2_MMM_hr)[1] = "Date"
#remove hours from data
Dublin_Ringsend_CO_NOx_SO2_MMM_hr$Date = as.Date(Dublin_Ringsend_CO_NOx_SO2_MMM_hr$Date,format='%Y-%m-%d %H')
#save the converted hourly data
write_csv(Dublin_Ringsend_CO_NOx_SO2_MMM_hr, "../Gathered_Data/Dublin_Ringsend_CO_NOx_SO2_MMM_hr_MMM_daily.csv")
#clean the enviroment
rm(list=ls())
#ROSEMOUNT MONTHLY--------------------------------------------------------------------------------
setwd('..')
setwd("Rosemount/")
#All metals 2012
Dublin_Rosemount_AllMetalDeposition_2012_ngm3_month <- read_excel("Dublin_Rosemount_AllMetalDeposition_2012_ngm3_month.xlsx")
#As
#Arsenic, As data and remove header
As_files = dir(pattern = "As")
As_list = lapply(As_files, read_xlsx)
As = do.call(rbind, As_list)
As = As[- grep("ug m-3 day-1", As$As),]
As = As[- grep("ug m-2 day-1", As$As),]
#create a date column for merging data
Dates = as.data.frame(seq(as.Date("2010/1/1"), by = "month", length.out = nrow(As) + 12))
colnames(Dates)[1] = "Date"
#there is no 2011 data so remove that from dates
Date1 = as.Date("2011-12-01")
Date2 = as.Date("2011-01-01")
Dates = filter(Dates, Dates$Date > Date1 | Dates$Date < Date2)
#bind dates with As
As = cbind(As, Dates)
#remove old date months and replace with new date column and swap so the date column is first
As$X__1 = NULL
As = As[,c(2,1)]
#Cadmium, Cd import and clean header data
Cd_files = dir(pattern = "Cd")
Cd_list = lapply(Cd_files, read_xlsx)
Cd = do.call(rbind, Cd_list)
Cd = Cd[- grep("ug m-3 day-1", Cd$Cd),]
Cd = Cd[- grep("ug m-2 day-1", Cd$Cd),]
#same dates as As, add the date column created for that
Cd = cbind(Cd, Dates)
#remove old date months and replace with new date column and swap so the date column is first
Cd$X__1 = NULL
Cd = Cd[,c(2,1)]
#Hg
Hg_files = dir(pattern = "Hg")
Hg_list = lapply(Hg_files, read_xlsx)
Hg = do.call(rbind, Hg_list)
#clean headers
Hg = Hg[- grep("ug m-2 day-1", Hg$Hg),]
#change the months to dates with years
Hg$Date = seq(as.Date("2012/1/1"), by = "month", length.out = nrow(Hg))
#remove old date months and replace with new date column and swap so the date column is first
Hg$X__1 = NULL
Hg = Hg[,c(2,1)]
#Ni
#Nickel, import Ni data and remove header
Ni_files = dir(pattern = "Ni")
Ni_list = lapply(Ni_files, read_xlsx)
Ni = do.call(rbind, Ni_list)
#had to remove rows manually, the search function wouldnt work for some reason
Ni = Ni[- grep("ug m-3 day-1", Ni$Ni),]
Ni = Ni[-c(13, 26), ]
#same dates as As, add the date column created for that
Ni = cbind(Ni, Dates)
#remove old date months and replace with new date column and swap so the date column is first
Ni$X__1 = NULL
Ni = Ni[,c(2,1)]
#Pb
#import lead data, Pb, and remove the headers
Pb_files = dir(pattern = "Pb")
Pb_list = lapply(Pb_files, read_xlsx)
Pb = do.call(rbind, Pb_list)
Pb = Pb[- grep("ug m-3 day-1", Pb$Pb),]
Pb = Pb[- grep("ug m-2 day-1", Pb$Pb),]
#same dates as As, add the date column created for that
Pb = cbind(Pb, Dates)
#remove old date months and replace with new date column and swap so the date column is first
Pb$X__1 = NULL
Pb = Pb[,c(2,1)]
#merge the Rosemount monthly data together
#merge daily data for Ringsend
Dublin_Rosemount_As_Cd_Hg_Ni_Pb = Reduce(function(x, y) merge(x, y, all=TRUE), list(As,Cd,Hg,Ni,Pb))
#remove the day from the date column
Dublin_Rosemount_As_Cd_Hg_Ni_Pb$Date = format(Dublin_Rosemount_As_Cd_Hg_Ni_Pb$Date, format="%Y-%m")
#save the gathered data
write_csv(Dublin_Rosemount_As_Cd_Hg_Ni_Pb, "../Gathered_Data/Dublin_Rosemount_As_Cd_Hg_Ni_Pb_monthly.csv")
#clean the enviroment
rm(list=ls())
#ST. ANNES PARK HOURLY --------------------------------------------------------------------------------
setwd('..')
setwd("StAnnesPark/")
#HOURLY NOx
#import and bind the NOx files
NOx_files = dir(pattern = "NOx.*ugm3|ugm3.*NOx")
NOx_list = lapply(NOx_files, read_xlsx)
NOx_final = do.call(rbind, NOx_list)
#clean the data of headings
NOx_final = NOx_final[- grep("ug/m3", NOx_final$NOx),]
#combine date and time column
#Combine the date and the time for the different pollutants to help with graphing
NOx_final$Date <- with(NOx_final, as.POSIXct(paste(NOx_final$Date, NOx_final$Time), format="%Y-%m-%d %H"))
NOx_final$Time = NULL
NOx = NOx_final
#save the gathered data
write_csv(NOx, "../Gathered_Data/Dublin_StAnnesPark_NOx_hr.csv")
#Convert to daily values
#calculate min/max/mean for the data
#change from string to numeric values
NOx$NOx = as.numeric(NOx$NOx)
NOx$NO = as.numeric(NOx$NO)
NOx$NO2 = as.numeric(NOx$NO2)
mean = aggregate(NOx[names(NOx)!='Date'], list(hour=cut(NOx$Date,'day')), mean, na.rm=F)
names(mean) <- gsub("NOx", "NOx_Mean", names(mean))
names(mean) <- gsub("NO$", "NO_Mean", names(mean))
names(mean) <- gsub("NO2", "NO2_Mean", names(mean))
min = aggregate(NOx[names(NOx)!='Date'], list(hour=cut(NOx$Date,'day')), min, na.rm=F)
names(min) <- gsub("NOx", "NOx_Min", names(min))
names(min) <- gsub("NO$", "NO_Min", names(min))
names(min) <- gsub("NO2", "NO2_Min", names(min))
max = aggregate(NOx[names(NOx)!='Date'], list(hour=cut(NOx$Date,'day')), max, na.rm=F)
names(max) <- gsub("NOx", "NOx_Max", names(max))
names(max) <- gsub("NO$", "NO_Max", names(max))
names(max) <- gsub("NO2", "NO2_Max", names(max))
#merge the data with the CO data
Dublin_StAnnesPark_NOx_MMM_daily = Reduce(function(x, y) merge(x, y, all=TRUE), list(mean,min,max))
#change column name to date
colnames(Dublin_StAnnesPark_NOx_MMM_daily)[1] = "Date"
#remove hours from data
Dublin_StAnnesPark_NOx_MMM_daily$Date = as.Date(Dublin_StAnnesPark_NOx_MMM_daily$Date,format='%Y-%m-%d %H')
#save the converted hourly data
write_csv(Dublin_StAnnesPark_NOx_MMM_daily, "../Gathered_Data/Dublin_StAnnesPark_NOx_MMM_daily.csv")
#clean the enviroment
rm(list=ls())
#ST. ANNES PARK DAILY --------------------------------------------------------------------------------
#PM10
#import PM10 files
PM10_files = dir(pattern = "PM10.*xlsx$")
PM10_list = lapply(PM10_files, read_xlsx)
PM10 = do.call(rbind, PM10_list)
#clean the data of headers
PM10 = PM10[- grep("ug/m3", PM10$PM10),]
#remove hours from data
PM10$Date = as.Date(PM10$Date,format='%Y-%m-%d %H')
#save the gathered data
write_csv(PM10, "../Gathered_Data/Dublin_StAnnesPark_PM10_daily.csv")
#clean the enviroment
rm(list=ls())
#SWORDS HOURLY ------------------------------------------------------------------------------------
setwd('..')
setwd("Swords/")
#HOURLY NOx
#bind the NOx files chronologically
NOx_files = dir(pattern = "*NOx")
NOx_list = lapply(NOx_files, read_xlsx)
NOx_final = do.call(rbind, NOx_list)
#clear rows with ppb written in them
NOx_final = NOx_final[- grep("ug/m3", NOx_final$NO2),]
#Combine the date and the time for the different pollutants to help with graphing
NOx_final$Date <- with(NOx_final, as.POSIXct(paste(NOx_final$Date, NOx_final$Time), format="%Y-%m-%d %H"))
NOx_final$Time = NULL
NOx = NOx_final
#Ozone
#find ppb files
O3_ppb_files = dir(pattern = "O3.*ppb|ppb.*O3")
O3_ppb_list = lapply(O3_ppb_files, read_xlsx)
O3_ppb = do.call(rbind, O3_ppb_list)
#clear rows with ppb written in them
O3_ppb = O3_ppb[- grep("ppb", O3_ppb$ozone),]
#convert all the O3 rows from strings to numerical values for calculations
O3_ppb[, 3] <- sapply(O3_ppb[,3], as.numeric)
#convert O3 from ppb to ugm3. molecular weight is 48. formula is ppb x moleucular weight/22.41
O3_ppb$ozone =O3_ppb$ozone * (48/22.41)
#import ugm3 files
O3_ugm3_files = dir(pattern = "O3.*ugm3|ugm3*O3")
O3_ugm3_list = lapply(O3_ugm3_files, read_xlsx)
O3_ugm3 = do.call(rbind, O3_ugm3_list)
#clear headers
O3_ugm3 = O3_ugm3[- grep("ug/m3", O3_ugm3$ozone),]
#bind the ppb and ugm3 data
O3_final = rbind(O3_ppb, O3_ugm3)
#Combine the date and the time for the different pollutants to help with graphing
O3_final$Date <- with(O3_final, as.POSIXct(paste(O3_final$Date, O3_final$Time), format="%Y-%m-%d %H"))
O3_final$Time = NULL
#plot(O3_final, type = "l")
O3 = O3_final
#merge the data for Swords
Dublin_Swords_NOx_Ozone_hr = Reduce(function(x, y) merge(x, y, all=TRUE), list(NOx, O3))
#save the gathered data
write_csv(Dublin_Swords_NOx_Ozone_hr, "../Gathered_Data/Dublin_Swords_NOx_Ozone_hr.csv")
#Convert to daily values
#calculate min/max/mean for the data
#change from string to numeric values
Dublin_Swords_NOx_Ozone_hr$NOx = as.numeric(Dublin_Swords_NOx_Ozone_hr$NOx)
Dublin_Swords_NOx_Ozone_hr$NO = as.numeric(Dublin_Swords_NOx_Ozone_hr$NO)
Dublin_Swords_NOx_Ozone_hr$NO2 = as.numeric(Dublin_Swords_NOx_Ozone_hr$NO2)
Dublin_Swords_NOx_Ozone_hr$ozone = as.numeric(Dublin_Swords_NOx_Ozone_hr$ozone)
mean = aggregate(Dublin_Swords_NOx_Ozone_hr[names(Dublin_Swords_NOx_Ozone_hr)!='Date'], list(hour=cut(Dublin_Swords_NOx_Ozone_hr$Date,'day')), mean, na.rm=F)
names(mean) <- gsub("NOx", "NOx_Mean", names(mean))
names(mean) <- gsub("NO$", "NO_Mean", names(mean))
names(mean) <- gsub("NO2", "NO2_Mean", names(mean))
names(mean) <- gsub("ozone", "ozone_Mean", names(mean))
min = aggregate(Dublin_Swords_NOx_Ozone_hr[names(Dublin_Swords_NOx_Ozone_hr)!='Date'], list(hour=cut(Dublin_Swords_NOx_Ozone_hr$Date,'day')), min, na.rm=F)
names(min) <- gsub("NOx", "NOx_Min", names(min))
names(min) <- gsub("NO$", "NO_Min", names(min))
names(min) <- gsub("NO2", "NO2_Min", names(min))
names(min) <- gsub("ozone", "ozone_Min", names(min))
max = aggregate(Dublin_Swords_NOx_Ozone_hr[names(Dublin_Swords_NOx_Ozone_hr)!='Date'], list(hour=cut(Dublin_Swords_NOx_Ozone_hr$Date,'day')), max, na.rm=F)
names(max) <- gsub("NOx", "NOx_Max", names(max))
names(max) <- gsub("NO$", "NO_Max", names(max))
names(max) <- gsub("NO2", "NO2_Max", names(max))
names(max) <- gsub("ozone", "ozone_Max", names(max))
#merge the data with the CO data
Dublin_Swords_NOx_Ozone_hr_MMM_daily = Reduce(function(x, y) merge(x, y, all=TRUE), list(mean,min,max))
#change column name to date
colnames(Dublin_Swords_NOx_Ozone_hr_MMM_daily)[1] = "Date"
#remove hours from data
Dublin_Swords_NOx_Ozone_hr_MMM_daily$Date = as.Date(Dublin_Swords_NOx_Ozone_hr_MMM_daily$Date,format='%Y-%m-%d %H')
#save the converted hourly data
write_csv(Dublin_Swords_NOx_Ozone_hr_MMM_daily, "../Gathered_Data/Dublin_Swords_NOx_Ozone_hr_MMM_daily.csv")
#clean the enviroment
rm(list=ls())
#TALLAGHT DAILY ------------------------------------------------------------------------------------
setwd('..')
setwd("Tallaght/")
#DAILY PM10
#import PM10 xls file
PM10_files = dir(pattern = "PM10.*xls$")
PM10_list = lapply(PM10_files, read_xls)
PM10_xls = do.call(rbind, PM10_list)
#import xls files
PM10_files = dir(pattern = "PM10.*xlsx")
PM10_list = lapply(PM10_files, read_xlsx)
PM10_xlsx = do.call(rbind, PM10_list)
#bind the text files and the xls file together. clean the data of headers
PM10 = rbind(PM10_xls, PM10_xlsx)
PM10 = PM10[- grep("ug/m3", PM10$PM10),]
#remove hours from data
PM10$Date = as.Date(PM10$Date,format='%Y-%m-%d %H')
#save the gathered data
write_csv(PM10, "../Gathered_Data/Dublin_Tallaght_PM10_daily.csv")
#TALLAGHT HOURLY ------------------------------------------------------------------------------------
#SO2
#search for ppb SO2 files using logical OR statement
SO2_ppb_files = dir(pattern = "SO2.*ppb|ppb.*SO2")
SO2_ppb_list = lapply(SO2_ppb_files, read_xlsx)
SO2_ppb = do.call(rbind, SO2_ppb_list)
#clean possible headings
SO2_ppb = SO2_ppb[- grep("ppb", SO2_ppb$SO2),]
#change to numeric values for maths
SO2_ppb$SO2 = as.numeric(SO2_ppb$SO2) * (64/22.41)
#search for ugm3 SO2 files using logical OR statement
SO2_files = dir(pattern = "SO2.*ugm3|ugm3*SO2")
SO2_list = lapply(SO2_files, read_xlsx)
SO2_ugm3 = do.call(rbind, SO2_list)
#bind SO2 data
SO2 = rbind(SO2_ppb, SO2_ugm3)
#clean possible headings
SO2 = SO2[- grep("ug/m3", SO2$SO2),]
#save the gathered data
write_csv(SO2, "../Gathered_Data/Dublin_Tallaght_SO2_hr.csv")
#atomic error workaround
SO2_ave = SO2
#Combine the date and the time for the different pollutants to help with graphing
SO2_ave$Date <- with(SO2_ave, as.POSIXct(paste(SO2_ave$Date, SO2_ave$Time), format="%Y-%m-%d %H"))
SO2_ave$Time = NULL
SO2 = SO2_ave
#Convert to daily values
#calculate min/max/mean for the data
#change from string to numeric values
SO2$SO2 = as.numeric(SO2$SO2)
mean = aggregate(SO2[names(SO2)!='Date'], list(hour=cut(SO2$Date,'day')), mean, na.rm=F)
names(mean) <- gsub("SO2", "SO2_Mean", names(mean))
min = aggregate(SO2[names(SO2)!='Date'], list(hour=cut(SO2$Date,'day')), min, na.rm=F)
names(min) <- gsub("SO2", "SO2_Min", names(min))
max = aggregate(SO2[names(SO2)!='Date'], list(hour=cut(SO2$Date,'day')), max, na.rm=F)
names(max) <- gsub("SO2", "SO2_Max", names(max))
#merge the data with the CO data
Dublin_Tallaght_SO2_hr_MMM_daily = Reduce(function(x, y) merge(x, y, all=TRUE), list(mean,min,max))
#change column name to date
colnames(Dublin_Tallaght_SO2_hr_MMM_daily)[1] = "Date"
#remove hours from data
Dublin_Tallaght_SO2_hr_MMM_daily$Date = as.Date(Dublin_Tallaght_SO2_hr_MMM_daily$Date,format='%Y-%m-%d %H')
#save the converted hourly data
write_csv(Dublin_Tallaght_SO2_hr_MMM_daily, "../Gathered_Data/Dublin_Tallaght_SO2_hr_MMM_daily.csv")
#clean the enviroment
rm(list=ls())
#WINETAVERN STREET MONTHLY ------------------------------------------------------------------------------------
setwd('..')
setwd("WinetavernSt/")
#Monthly
#As
#Arsenic, As data and remove header
As_files = dir(pattern = "As")
As_list = lapply(As_files, read_xlsx)
As = do.call(rbind, As_list)
As = As[- grep("ng/m3", As$As),]
#create a date column for merging data. added 12 for the extra year missing missing in the data
Dates = as.data.frame(seq(as.Date("2009/1/1"), by = "month", length.out = nrow(As) + 12))
colnames(Dates)[1] = "Date"
#there is no 2011 data so remove that from dates
Date1 = as.Date("2010-12-01")
Date2 = as.Date("2010-01-01")
Dates = filter(Dates, Dates$Date > Date1 | Dates$Date < Date2)
#bind dates with As
As = cbind(As, Dates)
#remove old date months and replace with new date column and swap so the date column is first
As$X__1 = NULL
As = As[,c(2,1)]
#BaP
BaP_files = dir(pattern = "BaP")
BaP_list = lapply(BaP_files, read_xlsx)
BaP = do.call(rbind, BaP_list)
#clean headers
BaP = BaP[- grep("ng/m3", BaP$`B(a)P`),]
#same dates as As, add the date column created for that
BaP = cbind(BaP, Dates)
#remove old date months and replace with new date column and swap so the date column is first
BaP$X__1 = NULL
BaP = BaP[,c(2,1)]
#Cadmium, Cd import and clean header data
Cd_files = dir(pattern = "Cd")
Cd_list = lapply(Cd_files, read_xlsx)
Cd = do.call(rbind, Cd_list)
#Clean old headers in the data
Cd = Cd[- grep("ng/m3", Cd$Cd),]
#same dates as As, add the date column created for that
Cd = cbind(Cd, Dates)
#remove old date months and replace with new date column and swap so the date column is first
Cd$X__1 = NULL
Cd = Cd[,c(2,1)]
#Ni
#Nickel, import Ni data and remove header
Ni_files = dir(pattern = "Ni")
Ni_list = lapply(Ni_files, read_xlsx)
Ni = do.call(rbind, Ni_list)
#had to remove rows manually, the search function wouldnt work for some reason
Ni = Ni[- grep("ng/m3", Ni$Ni),]
#same dates as As, add the date column created for that
Ni = cbind(Ni, Dates)
#remove old date months and replace with new date column and swap so the date column is first
Ni$X__1 = NULL
Ni = Ni[,c(2,1)]
#Pb
#import lead data, Pb, and remove the headers
Pb_files = dir(pattern = "Pb")
Pb_list = lapply(Pb_files, read_xlsx)
Pb = do.call(rbind, Pb_list)
#clean old headers from the data
Pb = Pb[- grep("ng/m3", Pb$Pb),]
#same dates as As, add the date column created for that
Pb = cbind(Pb, Dates)
#remove old date months and replace with new date column and swap so the date column is first
Pb$X__1 = NULL
Pb = Pb[,c(2,1)]
#merge monthly data for WinetavernSt
Dublin_WinetavernSt_As_BaP_Cd_Ni_Pb = Reduce(function(x, y) merge(x, y, all=TRUE), list(As,BaP,Cd,Ni,Pb))
#remove the day from the date column
Dublin_WinetavernSt_As_BaP_Cd_Ni_Pb$Date = format(Dublin_WinetavernSt_As_BaP_Cd_Ni_Pb$Date, format="%Y-%m")
#save the gathered data
write_csv(Dublin_WinetavernSt_As_BaP_Cd_Ni_Pb, "../Gathered_Data/Dublin_WinetavernSt_As_BaP_Cd_Ni_Pb_monthly.csv")
#clean the enviroment
rm(list=ls())
#WINETAVERN STREET DAILY ------------------------------------------------------------------------------------
#PM10
#xls files
PM10_files = dir(pattern = "*PM10.*(.*)xls$")
PM10_list = lapply(PM10_files, read_xls)
PM10_xls = do.call(rbind, PM10_list)
#txt
PM10_files = dir(pattern = "*PM10.*(.*)txt$")
PM10_list = lapply(PM10_files, read_table2)
PM10_txt = do.call(rbind, PM10_list)
colnames(PM10_txt)[2] = "PM10"
#change the date format for binding
PM10_txt$Date = parse_date_time(PM10_txt$Date, c('dmy', 'ymd'))
#xlsx
PM10_files = dir(pattern = "*PM10.*(.*)xlsx$")
PM10_list = lapply(PM10_files, read_xlsx)
PM10_xlsx = do.call(rbind, PM10_list)
#bind the PM10 data
PM10 = rbind(PM10_xls, PM10_txt ,PM10_xlsx)
#sort the data by date
PM10 = PM10[order(as.Date(PM10$Date, format="%Y/%m/%d")),]
#clean the data of headers
PM10 = PM10[- grep("ug/m3", PM10$PM10),]
#remove hours from data
PM10$Date = as.Date(PM10$Date,format='%Y-%m-%d %H')
#save the gathered data
write_csv(PM10, "../Gathered_Data/Dublin_WinetavernSt_PM10_daily.csv")
#clean the enviroment
rm(list=ls())
#WINETAVERN STREET HOURLY ------------------------------------------------------------------------------------
#CO
#xls files
CO_files = dir(pattern = "*CO.*(.*)xls$")
CO_list = lapply(CO_files, read_xls)
#rename columns for binding. Changing columns labelled Hour to newer Time format used by the EPA. change CO columns to the same heading
CO_list <- lapply(CO_list, function(x) setNames(x, gsub("^Hour$", "Time", names(x))) )
CO_xls = do.call(rbind, CO_list)
#txt
CO_files = dir(pattern = "*CO*(.*)txt$")
CO_list = lapply(CO_files, read_table2)
CO_txt = do.call(rbind, CO_list)
colnames(CO_txt)[3] = "CO"
#change the date format for binding
CO_txt$Date = parse_date_time(CO_txt$Date, c('dmy', 'ymd'))
#xlsx ppm file import and convert to ugm3
CO_files = dir(pattern = "CO.*ppm")
CO_list = lapply(CO_files, read_xlsx)
CO_ppm = do.call(rbind, CO_list)
#clear old headers
CO_ppm = CO_ppm[- grep("ppm", CO_ppm$CO ),]
#convert ppm to mgm3. CO molecular weight is 28. convert from a tring to numerical values
CO_ppm$CO = as.numeric(CO_ppm$CO)
CO_ppm$CO = CO_ppm$CO * (28/22.41)
#xlsx ugm3 file import
CO_files = dir(pattern = "CO.*mgm3")
CO_list = lapply(CO_files, read_xlsx)
CO_mgm3 = do.call(rbind, CO_list)
#clear old headers
CO_mgm3 = CO_mgm3[- grep("mg/m3", CO_mgm3$CO ),]
#bind the CO datasets
CO_final = rbind(CO_xls, CO_txt, CO_ppm, CO_mgm3)
#clear old headers
CO_final = CO_final[- grep("mg/m3", CO_final$CO ),]
#Combine the date and the time for the different pollutants to help with graphing
CO_final$Date <- with(CO_final, as.POSIXct(paste(CO_final$Date, CO_final$Time), format="%Y-%m-%d %H"))
CO_final$Time = NULL
CO = CO_final
#plot(CO, type = "l")
#NOX
#NOX ppb files import
ppb_NOx = dir(pattern = "NOx.*ppb.*(.*)xls$")
NOx_list = lapply(ppb_NOx, read_xls)
#rename columns for binding
NOx_list <- lapply(NOx_list, function(x) setNames(x, gsub("^Hour$", "Time", names(x))) )
NOx_list <- lapply(NOx_list, function(x) setNames(x, gsub("^Nox$", "NOx", names(x))) )
NOx_list <- lapply(NOx_list, function(x) setNames(x, gsub("^NOX$", "NOx", names(x))) )
#bind the data
NOx_ppb = do.call(rbind, NOx_list)
#import ppb xlsx file
NOx_files = dir(pattern = "NOx.*ppb.*(.*)xlsx$")
NOx_list = lapply(NOx_files, read_xlsx)
NOx_ppb_xlsx = do.call(rbind, NOx_list)
#bind ppb dateframes
NOx_ppb = rbind(NOx_ppb, NOx_ppb_xlsx)
#clear rows with ppb written in them
NOx_ppb = NOx_ppb[- grep("ppb", NOx_ppb$NO2),]
#convert all the NO columns from strings to numerical values for calculations
NOx_ppb[, 3:5] <- sapply(NOx_ppb[, 3:5], as.numeric)
#is.numeric(NOx_ppb$NO2)
#convert from ppb to ugm3. molecular weight/22.41
NOx_ppb$NOx = NOx_ppb$NOx * (46/22.41)
NOx_ppb$NO = NOx_ppb$NO * (30/22.41)
NOx_ppb$NO2 = NOx_ppb$NO2 * (46/22.41)
#import ugm3 xls file
NOx_files = dir(pattern = "NOx.*ugm3(.*)xls$")
NOx_list = lapply(NOx_files, read_xls)
NOx_ugm3_xls = do.call(rbind, NOx_list)
#change hour column to Time
colnames(NOx_ugm3_xls)[2] = "Time"
#import ugm3 xlsx file
NOx_files = dir(pattern = "NOx.*ugm3(.*)xlsx$")
NOx_list = lapply(NOx_files, read_xlsx)
NOx_ugm3_xlsx = do.call(rbind, NOx_list)
#xls file has only 3 columns so need to use rbind fill in plyr library
library(plyr)
#bind all NOx data
NOx_final = rbind.fill(NOx_ppb, NOx_ugm3_xls, NOx_ugm3_xlsx)
#clear rows with ppb written in them
NOx_final = NOx_final[- grep("ug/m3", NOx_final$NO2),]
#Combine the date and the time for the different pollutants to help with graphing
NOx_final$Date <- with(NOx_final, as.POSIXct(paste(NOx_final$Date, NOx_final$Time), format="%Y-%m-%d %H"))
NOx_final$Time = NULL
#sort dataframe by time
NOx_final = NOx_final[order(as.Date(NOx_final$Date, format="%Y/%m/%d")),]
NOx = NOx_final
#SO2
#search for ppb SO2 files
SO2_ppb_files = dir(pattern = "SO2.*ppb|ppb.*SO2")
SO2_ppb_list = lapply(SO2_ppb_files, read_xlsx)
SO2_ppb = do.call(rbind, SO2_ppb_list)
#convert SO2 ppb data to ugm3 data for consistency
SO2_ppb$SO2 = as.numeric(SO2_ppb$SO2)
SO2_ppb$SO2 = SO2_ppb$SO2 * (64/22.41)
#search for xls SO2 files
SO2_files = dir(pattern = "SO2.*(.*)xls$")
SO2_list = lapply(SO2_files, read_xls)
#rename columns for binding
SO2_list <- lapply(SO2_list, function(x) setNames(x, gsub("^Hour$", "Time", names(x))))
#bind the files
SO2_xls = do.call(rbind, SO2_list)
#search for ppb SO2 files
SO2_files = dir(pattern = "SO2.*(.*)txt$")
SO2_list = lapply(SO2_files, read_table2)
SO2_txt = do.call(rbind, SO2_list)
#rename column for binding
colnames(SO2_txt)[3] = "SO2"
#change the date format for binding
SO2_txt$Date = parse_date_time(SO2_txt$Date, c('dmy', 'ymd'))
SO2_txt$Date = as.Date(SO2_txt$Date)
#is.Date(SO2_txt$Date)
#search for xlsx SO2 files
SO2_files = dir(pattern = "SO2.*ugm3(.*)xlsx$")
SO2_list = lapply(SO2_files, read_xlsx)
SO2_xlsx = do.call(rbind, SO2_list)
#bind all the SO2 data
SO2_final = rbind(SO2_ppb, SO2_xls, SO2_txt, SO2_xlsx)
#sort dataframe by time
SO2_final = SO2_final[order(as.Date(SO2_final$Date, format="%Y/%m/%d")),]
#Combine the date and the time for the different pollutants to help with graphing
SO2_final$Date <- with(SO2_final, as.POSIXct(paste(SO2_final$Date, SO2_final$Time), format="%Y-%m-%d %H"))
SO2_final$Time = NULL
SO2 = SO2_final
#clear old headers
SO2 = SO2[- grep("ug/m3", SO2$SO2 ),]
#remove Date NA's due to issues when merging
SO2 = SO2 %>% drop_na(Date)
CO = CO %>% drop_na(Date)
NOx = NOx %>% drop_na(Date)
#merge hourly data for WinetavernSt
Dublin_WinetavernSt_CO_NOx_SO2 = Reduce(function(x, y) merge(x, y, all=TRUE), list(CO, NOx, SO2))
#save the gathered data
write_csv(Dublin_WinetavernSt_CO_NOx_SO2, "../Gathered_Data/Dublin_WinetavernSt_CO_NOx_SO2_hr.csv")
#Convert to daily values
#calculate min/max/mean for the data
#change from string to numeric values
Dublin_WinetavernSt_CO_NOx_SO2$NOx = as.numeric(Dublin_WinetavernSt_CO_NOx_SO2$NOx)
Dublin_WinetavernSt_CO_NOx_SO2$NO = as.numeric(Dublin_WinetavernSt_CO_NOx_SO2$NO)
Dublin_WinetavernSt_CO_NOx_SO2$NO2 = as.numeric(Dublin_WinetavernSt_CO_NOx_SO2$NO2)
Dublin_WinetavernSt_CO_NOx_SO2$CO = as.numeric(Dublin_WinetavernSt_CO_NOx_SO2$CO)
Dublin_WinetavernSt_CO_NOx_SO2$SO2 = as.numeric(Dublin_WinetavernSt_CO_NOx_SO2$SO2)
mean = aggregate(Dublin_WinetavernSt_CO_NOx_SO2[names(Dublin_WinetavernSt_CO_NOx_SO2)!='Date'], list(hour=cut(Dublin_WinetavernSt_CO_NOx_SO2$Date,'day')), mean, na.rm=F)
names(mean) <- gsub("NOx", "NOx_Mean", names(mean))
names(mean) <- gsub("NO$", "NO_Mean", names(mean))
names(mean) <- gsub("NO2", "NO2_Mean", names(mean))
names(mean) <- gsub("CO", "CO_Mean", names(mean))
names(mean) <- gsub("SO2", "SO2_Mean", names(mean))
min = aggregate(Dublin_WinetavernSt_CO_NOx_SO2[names(Dublin_WinetavernSt_CO_NOx_SO2)!='Date'], list(hour=cut(Dublin_WinetavernSt_CO_NOx_SO2$Date,'day')), min, na.rm=F)
names(min) <- gsub("NOx", "NOx_Min", names(min))
names(min) <- gsub("NO$", "NO_Min", names(min))
names(min) <- gsub("NO2", "NO2_Min", names(min))
names(min) <- gsub("CO", "CO_Min", names(min))
names(min) <- gsub("SO2", "SO2_Min", names(min))
max = aggregate(Dublin_WinetavernSt_CO_NOx_SO2[names(Dublin_WinetavernSt_CO_NOx_SO2)!='Date'], list(hour=cut(Dublin_WinetavernSt_CO_NOx_SO2$Date,'day')), max, na.rm=F)
names(max) <- gsub("NOx", "NOx_Max", names(max))
names(max) <- gsub("NO$", "NO_Max", names(max))
names(max) <- gsub("NO2", "NO2_Max", names(max))
names(max) <- gsub("CO", "CO_Max", names(max))
names(max) <- gsub("SO2", "SO2_Max", names(max))
#merge the data with the CO data
Dublin_WinetavernSt_CO_NOx_SO2_hr_MMM_daily = Reduce(function(x, y) merge(x, y, all=TRUE), list(mean,min,max))
#change column name to date
colnames(Dublin_WinetavernSt_CO_NOx_SO2_hr_MMM_daily)[1] = "Date"
#remove hours from data
Dublin_WinetavernSt_CO_NOx_SO2_hr_MMM_daily$Date = as.Date(Dublin_WinetavernSt_CO_NOx_SO2_hr_MMM_daily$Date,format='%Y-%m-%d %H')
#save the converted hourly data
write_csv(Dublin_WinetavernSt_CO_NOx_SO2_hr_MMM_daily, "../Gathered_Data/Dublin_WinetavernSt_CO_NOx_SO2_hr_MMM_daily.csv")
#clean the enviroment
rm(list=ls())
#WOODQUAY DAILY --------------------------------------------------------------------------------
setwd('..')
setwd("Woodquay/")
#import PM10 xls file
PM10_files = dir(pattern = "PM10.*xls$")
PM10_list = lapply(PM10_files, read_xls)
PM10_xls = do.call(rbind, PM10_list)
#import PM10 xls file
PM10_files = dir(pattern = "PM10.*txt$")
PM10_list = lapply(PM10_files, read_table2)
PM10_txt = do.call(rbind, PM10_list)
#make sure R recognises Date column from txt files as a date column for proper binding
PM10_txt$Date = as.Date(PM10_txt$Date, format="%d/%m/%Y")
#change column name for binding
colnames(PM10_txt)[2] = "PM10"
#bind all PM10 data
PM10 = rbind(PM10_xls, PM10_txt)
#clean headers from the data
PM10 = PM10[- grep("ug/m3", PM10$PM10),]
#save the gathered data
write_csv(PM10, "../Gathered_Data/Dublin_Woodquay_PM10_daily.csv")
#WOODQUAY HOURLY --------------------------------------------------------------------------------
#import the Benzene file for this region
Dublin_Woodquay_Benzene_2001 <- read_excel("Dublin_Woodquay_Benzene_2001.xls")
#clean headers from the data
Dublin_Woodquay_Benzene_2001 = Dublin_Woodquay_Benzene_2001[- grep("ug/m3", Dublin_Woodquay_Benzene_2001$Benzene),]
#not enough data to save and use
#clean the enviroment
rm(list=ls())
|
#import data
dataset = read.csv('Social_Network_Ads.csv')
dataset = dataset[3:5]
#encode factor
dataset$Purchased = factor(dataset$Purchased, levels = c(0,1))
#split data to train and test set
library(caTools)
set.seed(123)
split = sample.split(dataset$Purchased, SplitRatio = 0.75)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
#feature sacling
training_set[-3] = scale(training_set[-3])
test_set[-3] = scale(test_set[-3])
#classifier
library(e1071)
classifier = naiveBayes(x = training_set[-3],
y = training_set$Purchased)
#predict
y_pred = predict(classifier, newdata = test_set[-3])
#confusion matrix
cm = table(test_set[,3], y_pred)
# Visualising the Training set results
library(ElemStatLearn)
set = training_set
X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c('Age', 'EstimatedSalary')
y_grid = predict(classifier, newdata = grid_set)
plot(set[, -3],
main = 'SVM (Training set)',
xlab = 'Age', ylab = 'Estimated Salary',
xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = '.', col = ifelse(y_grid == 1, 'springgreen3', 'tomato'))
points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'green4', 'red3'))
# Visualising the Test set results
library(ElemStatLearn)
set = test_set
X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c('Age', 'EstimatedSalary')
y_grid = predict(classifier, newdata = grid_set)
plot(set[, -3], main = 'SVM (Test set)',
xlab = 'Age', ylab = 'Estimated Salary',
xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = '.', col = ifelse(y_grid == 1, 'springgreen3', 'tomato'))
points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'green4', 'red3'))
| /NB.R | no_license | abhimanyudwivedi/weirdMLinR | R | false | false | 2,080 | r | #import data
dataset = read.csv('Social_Network_Ads.csv')
dataset = dataset[3:5]
#encode factor
dataset$Purchased = factor(dataset$Purchased, levels = c(0,1))
#split data to train and test set
library(caTools)
set.seed(123)
split = sample.split(dataset$Purchased, SplitRatio = 0.75)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
#feature sacling
training_set[-3] = scale(training_set[-3])
test_set[-3] = scale(test_set[-3])
#classifier
library(e1071)
classifier = naiveBayes(x = training_set[-3],
y = training_set$Purchased)
#predict
y_pred = predict(classifier, newdata = test_set[-3])
#confusion matrix
cm = table(test_set[,3], y_pred)
# Visualising the Training set results
library(ElemStatLearn)
set = training_set
X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c('Age', 'EstimatedSalary')
y_grid = predict(classifier, newdata = grid_set)
plot(set[, -3],
main = 'SVM (Training set)',
xlab = 'Age', ylab = 'Estimated Salary',
xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = '.', col = ifelse(y_grid == 1, 'springgreen3', 'tomato'))
points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'green4', 'red3'))
# Visualising the Test set results
library(ElemStatLearn)
set = test_set
X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c('Age', 'EstimatedSalary')
y_grid = predict(classifier, newdata = grid_set)
plot(set[, -3], main = 'SVM (Test set)',
xlab = 'Age', ylab = 'Estimated Salary',
xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = '.', col = ifelse(y_grid == 1, 'springgreen3', 'tomato'))
points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'green4', 'red3'))
|
plotly <- read.csv("~/Dropbox/Documents/ifpa/2016/writing_papers/ESEJb/data/forks/plotly.js.csv", header=TRUE)
atom <- read.csv("~/Dropbox/Documents/ifpa/2016/writing_papers/ESEJb/data/forks/atom.csv", header=TRUE)
swift <- read.csv("~/Dropbox/Documents/ifpa/2016/writing_papers/ESEJb/data/forks/swift.csv", header=TRUE)
hhvm <- read.csv("~/Dropbox/Documents/ifpa/2016/writing_papers/ESEJb/data/forks/hhvm.csv", header=TRUE)
storm <- read.csv("~/Dropbox/Documents/ifpa/2016/writing_papers/ESEJb/data/forks/storm.csv", header=TRUE)
hubot <- read.csv("~/Dropbox/Documents/ifpa/2016/writing_papers/ESEJb/data/forks/hubot.csv", header=TRUE)
roslyn <- read.csv("~/Dropbox/Documents/ifpa/2016/writing_papers/ESEJb/data/forks/roslyn.csv", header=TRUE)
zulip <- read.csv("~/Dropbox/Documents/ifpa/2016/writing_papers/ESEJb/data/forks/zulip.csv", header=TRUE)
setEPS()
postscript("~/Dropbox/Documents/ifpa/2016/writing_papers/ESEJb/figs/forks-plotly.eps", width=5,height=4.3)
plot(plotly$sum/ 1000, type="l", xlab="Weeks", ylab="Sum", cex.lab=1.2, lwd=2, axes = FALSE)
axis(1)
pts <- pretty(plotly$sum / 1000)
axis(2, at = pts, labels = paste(pts, "K", sep = ""))
title("Plotly")
box()
dev.off()
setEPS()
postscript("~/Dropbox/Documents/ifpa/2016/writing_papers/ESEJb/figs/forks-atom.eps", width=5,height=4.3)
plot(atom$sum/ 1000, type="l", xlab="Weeks", ylab="Sum", cex.lab=1.2, lwd=2, axes = FALSE)
axis(1)
pts <- pretty(atom$sum / 1000)
axis(2, at = pts, labels = paste(pts, "K", sep = ""))
title("atom")
box()
dev.off()
setEPS()
postscript("~/Dropbox/Documents/ifpa/2016/writing_papers/ESEJb/figs/forks-swift.eps", width=5,height=4.3)
plot(swift$sum/ 1000, type="l", xlab="Weeks", ylab="Sum", cex.lab=1.2, lwd=2, axes = FALSE)
axis(1)
pts <- pretty(swift$sum / 1000)
axis(2, at = pts, labels = paste(pts, "K", sep = ""))
title("Swift")
box()
dev.off()
setEPS()
postscript("~/Dropbox/Documents/ifpa/2016/writing_papers/ESEJb/figs/forks-hhvm.eps", width=5,height=4.3)
plot(hhvm$sum/ 1000, type="l", xlab="Weeks", ylab="Sum", cex.lab=1.2, lwd=2, axes = FALSE)
axis(1)
pts <- pretty(hhvm$sum / 1000)
axis(2, at = pts, labels = paste(pts, "K", sep = ""))
title("HHVM")
box()
dev.off()
setEPS()
postscript("~/Dropbox/Documents/ifpa/2016/writing_papers/ESEJb/figs/forks-storm.eps", width=5,height=4.3)
plot(storm$sum/ 1000, type="l", xlab="Weeks", ylab="Sum", cex.lab=1.2, lwd=2, axes = FALSE)
axis(1)
pts <- pretty(storm$sum / 1000)
axis(2, at = pts, labels = paste(pts, "K", sep = ""))
title("Storm")
box()
dev.off()
setEPS()
postscript("~/Dropbox/Documents/ifpa/2016/writing_papers/ESEJb/figs/forks-hubot.eps", width=5,height=4.3)
plot(hubot$sum/ 1000, type="l", xlab="Weeks", ylab="Sum", cex.lab=1.2, lwd=2, axes = FALSE)
axis(1)
pts <- pretty(hubot$sum / 1000)
axis(2, at = pts, labels = paste(pts, "K", sep = ""))
title("hubot")
box()
dev.off()
setEPS()
postscript("~/Dropbox/Documents/ifpa/2016/writing_papers/ESEJb/figs/forks-zulip.eps", width=5,height=4.3)
plot(zulip$sum/ 1000, type="l", xlab="Weeks", ylab="Sum", cex.lab=1.2, lwd=2, axes = FALSE)
axis(1)
pts <- pretty(zulip$sum / 1000)
axis(2, at = pts, labels = paste(pts, "K", sep = ""))
title("Zulip")
box()
dev.off()
setEPS()
postscript("~/Dropbox/Documents/ifpa/2016/writing_papers/ESEJb/figs/forks-roslyn.eps", width=5,height=4.3)
plot(roslyn$sum/ 1000, type="l", xlab="Weeks", ylab="Sum", cex.lab=1.2, lwd=2, axes = FALSE)
axis(1)
pts <- pretty(roslyn$sum / 1000)
axis(2, at = pts, labels = paste(pts, "K", sep = ""))
title("Roslyn")
box()
dev.off()
| /data/forks/forks.R | no_license | gustavopinto/migration-to-oss | R | false | false | 3,522 | r | plotly <- read.csv("~/Dropbox/Documents/ifpa/2016/writing_papers/ESEJb/data/forks/plotly.js.csv", header=TRUE)
atom <- read.csv("~/Dropbox/Documents/ifpa/2016/writing_papers/ESEJb/data/forks/atom.csv", header=TRUE)
swift <- read.csv("~/Dropbox/Documents/ifpa/2016/writing_papers/ESEJb/data/forks/swift.csv", header=TRUE)
hhvm <- read.csv("~/Dropbox/Documents/ifpa/2016/writing_papers/ESEJb/data/forks/hhvm.csv", header=TRUE)
storm <- read.csv("~/Dropbox/Documents/ifpa/2016/writing_papers/ESEJb/data/forks/storm.csv", header=TRUE)
hubot <- read.csv("~/Dropbox/Documents/ifpa/2016/writing_papers/ESEJb/data/forks/hubot.csv", header=TRUE)
roslyn <- read.csv("~/Dropbox/Documents/ifpa/2016/writing_papers/ESEJb/data/forks/roslyn.csv", header=TRUE)
zulip <- read.csv("~/Dropbox/Documents/ifpa/2016/writing_papers/ESEJb/data/forks/zulip.csv", header=TRUE)
setEPS()
postscript("~/Dropbox/Documents/ifpa/2016/writing_papers/ESEJb/figs/forks-plotly.eps", width=5,height=4.3)
plot(plotly$sum/ 1000, type="l", xlab="Weeks", ylab="Sum", cex.lab=1.2, lwd=2, axes = FALSE)
axis(1)
pts <- pretty(plotly$sum / 1000)
axis(2, at = pts, labels = paste(pts, "K", sep = ""))
title("Plotly")
box()
dev.off()
setEPS()
postscript("~/Dropbox/Documents/ifpa/2016/writing_papers/ESEJb/figs/forks-atom.eps", width=5,height=4.3)
plot(atom$sum/ 1000, type="l", xlab="Weeks", ylab="Sum", cex.lab=1.2, lwd=2, axes = FALSE)
axis(1)
pts <- pretty(atom$sum / 1000)
axis(2, at = pts, labels = paste(pts, "K", sep = ""))
title("atom")
box()
dev.off()
setEPS()
postscript("~/Dropbox/Documents/ifpa/2016/writing_papers/ESEJb/figs/forks-swift.eps", width=5,height=4.3)
plot(swift$sum/ 1000, type="l", xlab="Weeks", ylab="Sum", cex.lab=1.2, lwd=2, axes = FALSE)
axis(1)
pts <- pretty(swift$sum / 1000)
axis(2, at = pts, labels = paste(pts, "K", sep = ""))
title("Swift")
box()
dev.off()
setEPS()
postscript("~/Dropbox/Documents/ifpa/2016/writing_papers/ESEJb/figs/forks-hhvm.eps", width=5,height=4.3)
plot(hhvm$sum/ 1000, type="l", xlab="Weeks", ylab="Sum", cex.lab=1.2, lwd=2, axes = FALSE)
axis(1)
pts <- pretty(hhvm$sum / 1000)
axis(2, at = pts, labels = paste(pts, "K", sep = ""))
title("HHVM")
box()
dev.off()
setEPS()
postscript("~/Dropbox/Documents/ifpa/2016/writing_papers/ESEJb/figs/forks-storm.eps", width=5,height=4.3)
plot(storm$sum/ 1000, type="l", xlab="Weeks", ylab="Sum", cex.lab=1.2, lwd=2, axes = FALSE)
axis(1)
pts <- pretty(storm$sum / 1000)
axis(2, at = pts, labels = paste(pts, "K", sep = ""))
title("Storm")
box()
dev.off()
setEPS()
postscript("~/Dropbox/Documents/ifpa/2016/writing_papers/ESEJb/figs/forks-hubot.eps", width=5,height=4.3)
plot(hubot$sum/ 1000, type="l", xlab="Weeks", ylab="Sum", cex.lab=1.2, lwd=2, axes = FALSE)
axis(1)
pts <- pretty(hubot$sum / 1000)
axis(2, at = pts, labels = paste(pts, "K", sep = ""))
title("hubot")
box()
dev.off()
setEPS()
postscript("~/Dropbox/Documents/ifpa/2016/writing_papers/ESEJb/figs/forks-zulip.eps", width=5,height=4.3)
plot(zulip$sum/ 1000, type="l", xlab="Weeks", ylab="Sum", cex.lab=1.2, lwd=2, axes = FALSE)
axis(1)
pts <- pretty(zulip$sum / 1000)
axis(2, at = pts, labels = paste(pts, "K", sep = ""))
title("Zulip")
box()
dev.off()
setEPS()
postscript("~/Dropbox/Documents/ifpa/2016/writing_papers/ESEJb/figs/forks-roslyn.eps", width=5,height=4.3)
plot(roslyn$sum/ 1000, type="l", xlab="Weeks", ylab="Sum", cex.lab=1.2, lwd=2, axes = FALSE)
axis(1)
pts <- pretty(roslyn$sum / 1000)
axis(2, at = pts, labels = paste(pts, "K", sep = ""))
title("Roslyn")
box()
dev.off()
|
testlist <- list(n = 825556992L)
result <- do.call(breakfast:::setBitNumber,testlist)
str(result) | /breakfast/inst/testfiles/setBitNumber/libFuzzer_setBitNumber/setBitNumber_valgrind_files/1609960494-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 97 | r | testlist <- list(n = 825556992L)
result <- do.call(breakfast:::setBitNumber,testlist)
str(result) |
\name{Socatt}
\alias{Socatt}
\docType{data}
\title{Social Attitudes Survey}
\description{
These data come from the British Social Attitudes (BSA) Survey started
in 1983. The eligible persons were all adults aged 18 or over living
in private households in Britain. The data consist of completed
results of 264 respondents out of 410.
}
\usage{data(Socatt)}
\format{
A data frame with 1056 observations on the following 9 variables.
\describe{
\item{district}{District ID - a factor}
\item{respond}{Respondent code (within district) - a factor}
\item{year}{A factor with levels \code{1983}, \code{1984},
\code{1985}, and \code{1986}}
\item{numpos}{An ordered factor giving the number of positive answers to
seven questions.}
\item{party}{Political party chosen - a factor. Levels are
\code{conservative}, \code{labour}, \code{Lib/SDP/Alliance},
\code{others}, and \code{none}.}
\item{class}{Self assessed social class - a factor. Levels are
\code{middle}, \code{upper working}, and \code{lower working}.}
\item{gender}{Respondent's sex. (1=male, 2=female)}
\item{age}{Age in years}
\item{religion}{Religion - a factor. Levels are \code{Roman
Catholic}, \code{Protestant/Church of England}, \code{others},
and \code{none}.}
}
}
\details{
These data are provided as an example of multilevel data with a
multinomial response.
}
\source{
\url{http://www.bristol.ac.uk/cmm/learning/mmsoftware/data-rev.html}
}
\references{
McGrath, K. and Waterton, J. (1986). \emph{British Social Attitudes
1983-1986 panel survey.} London, Social and Community Planning
Research.
}
\examples{
str(Socatt)
summary(Socatt)
}
\keyword{datasets}
| /man/Socatt.Rd | no_license | cran/mlmRev | R | false | false | 1,777 | rd | \name{Socatt}
\alias{Socatt}
\docType{data}
\title{Social Attitudes Survey}
\description{
These data come from the British Social Attitudes (BSA) Survey started
in 1983. The eligible persons were all adults aged 18 or over living
in private households in Britain. The data consist of completed
results of 264 respondents out of 410.
}
\usage{data(Socatt)}
\format{
A data frame with 1056 observations on the following 9 variables.
\describe{
\item{district}{District ID - a factor}
\item{respond}{Respondent code (within district) - a factor}
\item{year}{A factor with levels \code{1983}, \code{1984},
\code{1985}, and \code{1986}}
\item{numpos}{An ordered factor giving the number of positive answers to
seven questions.}
\item{party}{Political party chosen - a factor. Levels are
\code{conservative}, \code{labour}, \code{Lib/SDP/Alliance},
\code{others}, and \code{none}.}
\item{class}{Self assessed social class - a factor. Levels are
\code{middle}, \code{upper working}, and \code{lower working}.}
\item{gender}{Respondent's sex. (1=male, 2=female)}
\item{age}{Age in years}
\item{religion}{Religion - a factor. Levels are \code{Roman
Catholic}, \code{Protestant/Church of England}, \code{others},
and \code{none}.}
}
}
\details{
These data are provided as an example of multilevel data with a
multinomial response.
}
\source{
\url{http://www.bristol.ac.uk/cmm/learning/mmsoftware/data-rev.html}
}
\references{
McGrath, K. and Waterton, J. (1986). \emph{British Social Attitudes
1983-1986 panel survey.} London, Social and Community Planning
Research.
}
\examples{
str(Socatt)
summary(Socatt)
}
\keyword{datasets}
|
#' @include RcppExports.R marxan-internal.R misc.R generics.R MarxanOpts.R MarxanData.R MarxanUnsolved.R MarxanResults.R MarxanSolved.R
NULL
#' General Marxan Function
#'
#' This is a general function to create Marxan objects from scratch and run the Marxan program to generate solutions.
#'
#' @param x generic argument.
#' @param ... arguments are passed to MarxanData and MarxanOpts functions.
#' @param path 'character' file path to Marxan input parameters file.
#' @param polygons 'SpatialPolyogns' object representing planning units.
#' @param rasters 'Raster' object with species distribution data.
#' @param pu "data.frame" planning unit data; with "integer" 'id', "numeric" 'cost', "integer" 'status' columns.
#' @param species "data.frame" with species data; with "integer" 'id', "numeric" 'target', "numeric" 'spf', and "character" 'name' columns.
#' @param puvspecies "data.frame" with data on species density in each planning unit, with "integer" 'species', "integer" 'pu', and "numeric" 'target' columns. This "data.frame" is sorted in order of 'pu' column.
#' @param boundary "data.frame" with data on the shared boundary length of planning; with "integer" 'id1', "integer" 'id2', and "numeric" 'amount' columns.
#' @param solve "logical" should the problem be solved using Marxan?
#' @export
#' @note See the package vignette for help.
#' @return "MarxanSolved" or "MarxanUnsolved"
#' @seealso \code{\link{MarxanOpts}}, \code{\link{MarxanData}}, \code{\link{MarxanResults}}, \code{\link{MarxanUnsolved}}
marxan<-function(x, ...) UseMethod('marxan')
#' @rdname marxan
#' @inheritParams marxan
#' @export
marxan.character<-function(path, solve=TRUE) {
if(file.exists(file.path(basename(path), 'output_sum.csv'))) {
return(read.MarxanSolved(path))
} else {
x<-read.MarxanUnsolved(path)
if (solve)
x<-solve.MarxanUnsolved(x)
return(x)
}
}
#' @rdname marxan
#' @inheritParams marxan
#' @export
marxan.SpatialPolygons<-function(polygons, rasters, ..., solve=TRUE) {
x<-MarxanUnsolved(
MarxanOpts(..., ignore.extra=TRUE),
format.MarxanData(polygons=polygons, rasters=rasters, ...)
)
if (solve)
x<-solve.MarxanUnsolved(x)
return(x)
}
#' @rdname marxan
#' @inheritParams marxan
#' @export
marxan.data.frame<-function(pu,species,puvspecies,boundary=NULL, ..., solve=TRUE) {
x<-MarxanUnsolved(
MarxanOpts(..., ignore.extra=TRUE),
MarxanData(pu=pu, species=species, puvspecies=puvspecies, boundary=boundary, ...)
)
if (solve)
x<-solve.MarxanUnsolved(x)
return(x)
}
| /R/marxan.R | no_license | jeffreyhanson/marxan | R | false | false | 2,514 | r | #' @include RcppExports.R marxan-internal.R misc.R generics.R MarxanOpts.R MarxanData.R MarxanUnsolved.R MarxanResults.R MarxanSolved.R
NULL
#' General Marxan Function
#'
#' This is a general function to create Marxan objects from scratch and run the Marxan program to generate solutions.
#'
#' @param x generic argument.
#' @param ... arguments are passed to MarxanData and MarxanOpts functions.
#' @param path 'character' file path to Marxan input parameters file.
#' @param polygons 'SpatialPolyogns' object representing planning units.
#' @param rasters 'Raster' object with species distribution data.
#' @param pu "data.frame" planning unit data; with "integer" 'id', "numeric" 'cost', "integer" 'status' columns.
#' @param species "data.frame" with species data; with "integer" 'id', "numeric" 'target', "numeric" 'spf', and "character" 'name' columns.
#' @param puvspecies "data.frame" with data on species density in each planning unit, with "integer" 'species', "integer" 'pu', and "numeric" 'target' columns. This "data.frame" is sorted in order of 'pu' column.
#' @param boundary "data.frame" with data on the shared boundary length of planning; with "integer" 'id1', "integer" 'id2', and "numeric" 'amount' columns.
#' @param solve "logical" should the problem be solved using Marxan?
#' @export
#' @note See the package vignette for help.
#' @return "MarxanSolved" or "MarxanUnsolved"
#' @seealso \code{\link{MarxanOpts}}, \code{\link{MarxanData}}, \code{\link{MarxanResults}}, \code{\link{MarxanUnsolved}}
marxan<-function(x, ...) UseMethod('marxan')
#' @rdname marxan
#' @inheritParams marxan
#' @export
marxan.character<-function(path, solve=TRUE) {
if(file.exists(file.path(basename(path), 'output_sum.csv'))) {
return(read.MarxanSolved(path))
} else {
x<-read.MarxanUnsolved(path)
if (solve)
x<-solve.MarxanUnsolved(x)
return(x)
}
}
#' @rdname marxan
#' @inheritParams marxan
#' @export
marxan.SpatialPolygons<-function(polygons, rasters, ..., solve=TRUE) {
x<-MarxanUnsolved(
MarxanOpts(..., ignore.extra=TRUE),
format.MarxanData(polygons=polygons, rasters=rasters, ...)
)
if (solve)
x<-solve.MarxanUnsolved(x)
return(x)
}
#' @rdname marxan
#' @inheritParams marxan
#' @export
marxan.data.frame<-function(pu,species,puvspecies,boundary=NULL, ..., solve=TRUE) {
x<-MarxanUnsolved(
MarxanOpts(..., ignore.extra=TRUE),
MarxanData(pu=pu, species=species, puvspecies=puvspecies, boundary=boundary, ...)
)
if (solve)
x<-solve.MarxanUnsolved(x)
return(x)
}
|
# Plotting networks from mdine fit using igraph pacakge
library(igraph)
library(phyloseq)
library(mdine)
# BE SURE TO SET YOUR WORKING DIRECTORY:
# setwd("~")
source("sim_eval_functions.R")
source('plot_networks.R')
load("cleaned_data_ileum.RData")
#Family analysis -- choosing top (named) families
n.fam = 15
named.fam = counts_ileum$family[,colnames(counts_ileum$family)!="f__"]
top.fam = names(head(sort(colMeans(named.fam),decreasing = TRUE), n.fam))
not.top.fam = colnames(counts_ileum$family)[!colnames(counts_ileum$family) %in% top.fam]
counts.top.fam = cbind(counts_ileum$family[,top.fam], rowSums(counts_ileum$family[,not.top.fam]))
colnames(counts.top.fam) = c(top.fam, "ref")
#Recoding diagnosis/antibiotics to drop zero categories
dat.ileum$diagnosis = factor(dat.ileum$diagnosis, levels=c("no", "CD"))
dat.ileum$antibiotics = factor(dat.ileum$antibiotics, levels=c("false", "true"))
covars = model.matrix(~diagnosis+age+sex+antibiotics, data=dat.ileum)
counts.top.fam = counts.top.fam[rownames(covars),]
# Running mdine
ci.probs <- c(0.025, 0.975) #c(0.05, 0.95)
mn.family = mdine(counts.top.fam, covars, covars[,"diagnosisCD"], quant=ci.probs)
# Calcuating predicted proportions for each individual
predict0 = exp(covars[covars[,"diagnosisCD"]==0,]%*%mn.family$post_mean$beta)
predict1 = exp(covars[covars[,"diagnosisCD"]==1,]%*%mn.family$post_mean$beta)
comp0 = predict0/(rowSums(predict0)+1)
comp1 = predict1/(rowSums(predict1)+1)
# Averaging proportions over all subjects to scale size of nodes
mean_comp0 = colMeans(comp0)
mean_comp1 = colMeans(comp1)
#Reorder based on phyla so they appear close together in network
taxa_table = tax_table(phyloseq_family)
fam_ind = match(colnames(counts.top.fam)[-NCOL(counts.top.fam)], taxa_table[,"Rank5"])
phyla_for_fam = taxa_table[fam_ind,"Rank2"]
reorder = order(phyla_for_fam)
phyla_names = substr(as.vector(phyla_for_fam), 4, nchar(phyla_for_fam))
# Plotting networks
cc_network = plotNetworks(counts.top.fam, mn.family$post_mean$invsigma0, mn.family$post_mean$invsigma1,
ci0=mn.family$ci$invsigma0, ci1=mn.family$ci$invsigma1,
lay=layout.mds,
lab0="Controls", lab1="Crohn's cases",
vertex.size0 = 3.5*(log(mean_comp0)-min(log(mean_comp0))+0.1),
vertex.size1 = 3.5*(log(mean_comp1)-min(log(mean_comp1))+0.1),
col = as.numeric(factor(phyla_for_fam)), seed=3984, vertex.label.cex = 1,
phyla_names=phyla_names, side.legend=TRUE, cex.main=2)
# Network differences
cor_diff = abs(cov2cor(mn.family$post_mean$invsigma1)) - abs(cov2cor(mn.family$post_mean$invsigma0))
adj_diff = ci_to_adj(mn.family$ci$invsigma_diff[[1]],
mn.family$ci$invsigma_diff[[2]])
sig.diffs = adj_diff*cor_diff
diff_graph = graph.adjacency(sig.diffs, mode="undirected", weighted=TRUE, diag = FALSE)
E(diff_graph)$color[E(diff_graph)$weight>0] = "black"
E(diff_graph)$color[E(diff_graph)$weight<=0] = "black"
E(diff_graph)$lty[E(diff_graph)$weight>0] = 1
E(diff_graph)$lty[E(diff_graph)$weight<=0] = 2
scale_line_width = 30
labs = substr(colnames(counts.top.fam)[-NCOL(counts.top.fam)], 4, nchar(colnames(counts.top.fam)[-NCOL(counts.top.fam)]))
# Colour to show abundance differences
abund_diff_col = ifelse(mean_comp1-mean_comp0>0, "green", "red")
# Plotting differences in networks
layout(matrix(c(1,2,3,3), nrow=2, byrow=TRUE), heights=c(4, 1, 1), widths=c(4,1,2))
par(mai=rep(0.2, 4))
plot.igraph(diff_graph, layout=cc_network, vertex.label.cex=1,
edge.width=abs(E(diff_graph)$weight)*scale_line_width, main="Network differences (CD minus control)",
vertex.color=abund_diff_col, vertex.size=15,#8*(abs(log(mean_comp1)-log(mean_comp0))),
vertex.shape="circle",
vertex.label=labs, vertex.label.color="black")
par(mai=c(0,0,0,0))
plot.new()
legend("center", legend=c("Higher in CD", "Lower in CD"), title="Family abundance",
col="black", pt.bg=c("green", "red"), pch=21, pt.cex = 3, box.col = "white", y.intersp = 2)
plot.new()
legend("center", legend=c("CD abs. association stronger", "Control abs. association stronger"),
lty=c(1,2), col=c("black", "black"), ncol=2, cex=1, lwd = 2, box.col = "white")
| /plot_mdine_networks.R | no_license | kevinmcgregor/crohns_analysis | R | false | false | 4,318 | r | # Plotting networks from mdine fit using igraph pacakge
library(igraph)
library(phyloseq)
library(mdine)
# BE SURE TO SET YOUR WORKING DIRECTORY:
# setwd("~")
source("sim_eval_functions.R")
source('plot_networks.R')
load("cleaned_data_ileum.RData")
#Family analysis -- choosing top (named) families
n.fam = 15
named.fam = counts_ileum$family[,colnames(counts_ileum$family)!="f__"]
top.fam = names(head(sort(colMeans(named.fam),decreasing = TRUE), n.fam))
not.top.fam = colnames(counts_ileum$family)[!colnames(counts_ileum$family) %in% top.fam]
counts.top.fam = cbind(counts_ileum$family[,top.fam], rowSums(counts_ileum$family[,not.top.fam]))
colnames(counts.top.fam) = c(top.fam, "ref")
#Recoding diagnosis/antibiotics to drop zero categories
dat.ileum$diagnosis = factor(dat.ileum$diagnosis, levels=c("no", "CD"))
dat.ileum$antibiotics = factor(dat.ileum$antibiotics, levels=c("false", "true"))
covars = model.matrix(~diagnosis+age+sex+antibiotics, data=dat.ileum)
counts.top.fam = counts.top.fam[rownames(covars),]
# Running mdine
ci.probs <- c(0.025, 0.975) #c(0.05, 0.95)
mn.family = mdine(counts.top.fam, covars, covars[,"diagnosisCD"], quant=ci.probs)
# Calcuating predicted proportions for each individual
predict0 = exp(covars[covars[,"diagnosisCD"]==0,]%*%mn.family$post_mean$beta)
predict1 = exp(covars[covars[,"diagnosisCD"]==1,]%*%mn.family$post_mean$beta)
comp0 = predict0/(rowSums(predict0)+1)
comp1 = predict1/(rowSums(predict1)+1)
# Averaging proportions over all subjects to scale size of nodes
mean_comp0 = colMeans(comp0)
mean_comp1 = colMeans(comp1)
#Reorder based on phyla so they appear close together in network
taxa_table = tax_table(phyloseq_family)
fam_ind = match(colnames(counts.top.fam)[-NCOL(counts.top.fam)], taxa_table[,"Rank5"])
phyla_for_fam = taxa_table[fam_ind,"Rank2"]
reorder = order(phyla_for_fam)
phyla_names = substr(as.vector(phyla_for_fam), 4, nchar(phyla_for_fam))
# Plotting networks
cc_network = plotNetworks(counts.top.fam, mn.family$post_mean$invsigma0, mn.family$post_mean$invsigma1,
ci0=mn.family$ci$invsigma0, ci1=mn.family$ci$invsigma1,
lay=layout.mds,
lab0="Controls", lab1="Crohn's cases",
vertex.size0 = 3.5*(log(mean_comp0)-min(log(mean_comp0))+0.1),
vertex.size1 = 3.5*(log(mean_comp1)-min(log(mean_comp1))+0.1),
col = as.numeric(factor(phyla_for_fam)), seed=3984, vertex.label.cex = 1,
phyla_names=phyla_names, side.legend=TRUE, cex.main=2)
# Network differences
cor_diff = abs(cov2cor(mn.family$post_mean$invsigma1)) - abs(cov2cor(mn.family$post_mean$invsigma0))
adj_diff = ci_to_adj(mn.family$ci$invsigma_diff[[1]],
mn.family$ci$invsigma_diff[[2]])
sig.diffs = adj_diff*cor_diff
diff_graph = graph.adjacency(sig.diffs, mode="undirected", weighted=TRUE, diag = FALSE)
E(diff_graph)$color[E(diff_graph)$weight>0] = "black"
E(diff_graph)$color[E(diff_graph)$weight<=0] = "black"
E(diff_graph)$lty[E(diff_graph)$weight>0] = 1
E(diff_graph)$lty[E(diff_graph)$weight<=0] = 2
scale_line_width = 30
labs = substr(colnames(counts.top.fam)[-NCOL(counts.top.fam)], 4, nchar(colnames(counts.top.fam)[-NCOL(counts.top.fam)]))
# Colour to show abundance differences
abund_diff_col = ifelse(mean_comp1-mean_comp0>0, "green", "red")
# Plotting differences in networks
layout(matrix(c(1,2,3,3), nrow=2, byrow=TRUE), heights=c(4, 1, 1), widths=c(4,1,2))
par(mai=rep(0.2, 4))
plot.igraph(diff_graph, layout=cc_network, vertex.label.cex=1,
edge.width=abs(E(diff_graph)$weight)*scale_line_width, main="Network differences (CD minus control)",
vertex.color=abund_diff_col, vertex.size=15,#8*(abs(log(mean_comp1)-log(mean_comp0))),
vertex.shape="circle",
vertex.label=labs, vertex.label.color="black")
par(mai=c(0,0,0,0))
plot.new()
legend("center", legend=c("Higher in CD", "Lower in CD"), title="Family abundance",
col="black", pt.bg=c("green", "red"), pch=21, pt.cex = 3, box.col = "white", y.intersp = 2)
plot.new()
legend("center", legend=c("CD abs. association stronger", "Control abs. association stronger"),
lty=c(1,2), col=c("black", "black"), ncol=2, cex=1, lwd = 2, box.col = "white")
|
local({
# the requested version of renv
version <- "0.12.2"
# the project directory
project <- getwd()
# avoid recursion
if (!is.na(Sys.getenv("RENV_R_INITIALIZING", unset = NA)))
return(invisible(TRUE))
# signal that we're loading renv during R startup
Sys.setenv("RENV_R_INITIALIZING" = "true")
on.exit(Sys.unsetenv("RENV_R_INITIALIZING"), add = TRUE)
# signal that we've consented to use renv
options(renv.consent = TRUE)
# load the 'utils' package eagerly -- this ensures that renv shims, which
# mask 'utils' packages, will come first on the search path
library(utils, lib.loc = .Library)
# check to see if renv has already been loaded
if ("renv" %in% loadedNamespaces()) {
# if renv has already been loaded, and it's the requested version of renv,
# nothing to do
spec <- .getNamespaceInfo(.getNamespace("renv"), "spec")
if (identical(spec[["version"]], version))
return(invisible(TRUE))
# otherwise, unload and attempt to load the correct version of renv
unloadNamespace("renv")
}
# load bootstrap tools
bootstrap <- function(version, library) {
# read repos (respecting override if set)
repos <- Sys.getenv("RENV_CONFIG_REPOS_OVERRIDE", unset = NA)
if (is.na(repos))
repos <- getOption("repos")
# fix up repos
on.exit(options(repos = repos), add = TRUE)
repos[repos == "@CRAN@"] <- "https://cloud.r-project.org"
options(repos = repos)
# attempt to download renv
tarball <- tryCatch(renv_bootstrap_download(version), error = identity)
if (inherits(tarball, "error"))
stop("failed to download renv ", version)
# now attempt to install
status <- tryCatch(renv_bootstrap_install(version, tarball, library), error = identity)
if (inherits(status, "error"))
stop("failed to install renv ", version)
}
renv_bootstrap_download_impl <- function(url, destfile) {
mode <- "wb"
# https://bugs.r-project.org/bugzilla/show_bug.cgi?id=17715
fixup <-
Sys.info()[["sysname"]] == "Windows" &&
substring(url, 1L, 5L) == "file:"
if (fixup)
mode <- "w+b"
download.file(
url = url,
destfile = destfile,
mode = mode,
quiet = TRUE
)
}
renv_bootstrap_download <- function(version) {
# if the renv version number has 4 components, assume it must
# be retrieved via github
nv <- numeric_version(version)
components <- unclass(nv)[[1]]
methods <- if (length(components) == 4L) {
list(renv_bootstrap_download_github)
} else {
list(
renv_bootstrap_download_cran_latest,
renv_bootstrap_download_cran_archive
)
}
for (method in methods) {
path <- tryCatch(method(version), error = identity)
if (is.character(path) && file.exists(path))
return(path)
}
stop("failed to download renv ", version)
}
renv_bootstrap_download_cran_latest <- function(version) {
# check for renv on CRAN matching this version
db <- as.data.frame(available.packages(), stringsAsFactors = FALSE)
entry <- db[db$Package %in% "renv" & db$Version %in% version, ]
if (nrow(entry) == 0) {
fmt <- "renv %s is not available from your declared package repositories"
stop(sprintf(fmt, version))
}
message("* Downloading renv ", version, " from CRAN ... ", appendLF = FALSE)
info <- tryCatch(
download.packages("renv", destdir = tempdir(), quiet = TRUE),
condition = identity
)
if (inherits(info, "condition")) {
message("FAILED")
return(FALSE)
}
message("OK")
info[1, 2]
}
renv_bootstrap_download_cran_archive <- function(version) {
name <- sprintf("renv_%s.tar.gz", version)
repos <- getOption("repos")
urls <- file.path(repos, "src/contrib/Archive/renv", name)
destfile <- file.path(tempdir(), name)
message("* Downloading renv ", version, " from CRAN archive ... ", appendLF = FALSE)
for (url in urls) {
status <- tryCatch(
renv_bootstrap_download_impl(url, destfile),
condition = identity
)
if (identical(status, 0L)) {
message("OK")
return(destfile)
}
}
message("FAILED")
return(FALSE)
}
renv_bootstrap_download_github <- function(version) {
enabled <- Sys.getenv("RENV_BOOTSTRAP_FROM_GITHUB", unset = "TRUE")
if (!identical(enabled, "TRUE"))
return(FALSE)
# prepare download options
pat <- Sys.getenv("GITHUB_PAT")
if (nzchar(Sys.which("curl")) && nzchar(pat)) {
fmt <- "--location --fail --header \"Authorization: token %s\""
extra <- sprintf(fmt, pat)
saved <- options("download.file.method", "download.file.extra")
options(download.file.method = "curl", download.file.extra = extra)
on.exit(do.call(base::options, saved), add = TRUE)
} else if (nzchar(Sys.which("wget")) && nzchar(pat)) {
fmt <- "--header=\"Authorization: token %s\""
extra <- sprintf(fmt, pat)
saved <- options("download.file.method", "download.file.extra")
options(download.file.method = "wget", download.file.extra = extra)
on.exit(do.call(base::options, saved), add = TRUE)
}
message("* Downloading renv ", version, " from GitHub ... ", appendLF = FALSE)
url <- file.path("https://api.github.com/repos/rstudio/renv/tarball", version)
name <- sprintf("renv_%s.tar.gz", version)
destfile <- file.path(tempdir(), name)
status <- tryCatch(
renv_bootstrap_download_impl(url, destfile),
condition = identity
)
if (!identical(status, 0L)) {
message("FAILED")
return(FALSE)
}
message("Done!")
return(destfile)
}
renv_bootstrap_install <- function(version, tarball, library) {
# attempt to install it into project library
message("* Installing renv ", version, " ... ", appendLF = FALSE)
dir.create(library, showWarnings = FALSE, recursive = TRUE)
# invoke using system2 so we can capture and report output
bin <- R.home("bin")
exe <- if (Sys.info()[["sysname"]] == "Windows") "R.exe" else "R"
r <- file.path(bin, exe)
args <- c("--vanilla", "CMD", "INSTALL", "-l", shQuote(library), shQuote(tarball))
output <- system2(r, args, stdout = TRUE, stderr = TRUE)
message("Done!")
# check for successful install
status <- attr(output, "status")
if (is.numeric(status) && !identical(status, 0L)) {
header <- "Error installing renv:"
lines <- paste(rep.int("=", nchar(header)), collapse = "")
text <- c(header, lines, output)
writeLines(text, con = stderr())
}
status
}
renv_bootstrap_prefix <- function() {
# construct version prefix
version <- paste(R.version$major, R.version$minor, sep = ".")
prefix <- paste("R", numeric_version(version)[1, 1:2], sep = "-")
# include SVN revision for development versions of R
# (to avoid sharing platform-specific artefacts with released versions of R)
devel <-
identical(R.version[["status"]], "Under development (unstable)") ||
identical(R.version[["nickname"]], "Unsuffered Consequences")
if (devel)
prefix <- paste(prefix, R.version[["svn rev"]], sep = "-r")
# build list of path components
components <- c(prefix, R.version$platform)
# include prefix if provided by user
prefix <- Sys.getenv("RENV_PATHS_PREFIX")
if (nzchar(prefix))
components <- c(prefix, components)
# build prefix
paste(components, collapse = "/")
}
renv_bootstrap_library_root <- function(project) {
path <- Sys.getenv("RENV_PATHS_LIBRARY", unset = NA)
if (!is.na(path))
return(path)
path <- Sys.getenv("RENV_PATHS_LIBRARY_ROOT", unset = NA)
if (!is.na(path))
return(file.path(path, basename(project)))
file.path(project, "renv/library")
}
renv_bootstrap_validate_version <- function(version) {
loadedversion <- utils::packageDescription("renv", fields = "Version")
if (version == loadedversion)
return(TRUE)
# assume four-component versions are from GitHub; three-component
# versions are from CRAN
components <- strsplit(loadedversion, "[.-]")[[1]]
remote <- if (length(components) == 4L)
paste("rstudio/renv", loadedversion, sep = "@")
else
paste("renv", loadedversion, sep = "@")
fmt <- paste(
"renv %1$s was loaded from project library, but this project is configured to use renv %2$s.",
"Use `renv::record(\"%3$s\")` to record renv %1$s in the lockfile.",
"Use `renv::restore(packages = \"renv\")` to install renv %2$s into the project library.",
sep = "\n"
)
msg <- sprintf(fmt, loadedversion, version, remote)
warning(msg, call. = FALSE)
FALSE
}
renv_bootstrap_load <- function(project, libpath, version) {
# try to load renv from the project library
if (!requireNamespace("renv", lib.loc = libpath, quietly = TRUE))
return(FALSE)
# warn if the version of renv loaded does not match
renv_bootstrap_validate_version(version)
# load the project
renv::load(project)
TRUE
}
# construct path to library root
root <- renv_bootstrap_library_root(project)
# construct library prefix for platform
prefix <- renv_bootstrap_prefix()
# construct full libpath
libpath <- file.path(root, prefix)
# attempt to load
if (renv_bootstrap_load(project, libpath, version))
return(TRUE)
# load failed; attempt to bootstrap
message("Bootstrapping renv ", version, " ...")
bootstrap(version, libpath)
# exit early if we're just testing bootstrap
if (!is.na(Sys.getenv("RENV_BOOTSTRAP_INSTALL_ONLY", unset = NA)))
return(TRUE)
# try again to load
if (requireNamespace("renv", lib.loc = libpath, quietly = TRUE)) {
message("Successfully installed and loaded renv ", version, ".")
return(renv::load())
}
# failed to download or load renv; warn the user
msg <- c(
"Failed to find an renv installation: the project will not be loaded.",
"Use `renv::activate()` to re-initialize the project."
)
warning(paste(msg, collapse = "\n"), call. = FALSE)
})
| /makescore/renv/activate.R | permissive | mpinese/nimpress | R | false | false | 10,374 | r |
local({
# the requested version of renv
version <- "0.12.2"
# the project directory
project <- getwd()
# avoid recursion
if (!is.na(Sys.getenv("RENV_R_INITIALIZING", unset = NA)))
return(invisible(TRUE))
# signal that we're loading renv during R startup
Sys.setenv("RENV_R_INITIALIZING" = "true")
on.exit(Sys.unsetenv("RENV_R_INITIALIZING"), add = TRUE)
# signal that we've consented to use renv
options(renv.consent = TRUE)
# load the 'utils' package eagerly -- this ensures that renv shims, which
# mask 'utils' packages, will come first on the search path
library(utils, lib.loc = .Library)
# check to see if renv has already been loaded
if ("renv" %in% loadedNamespaces()) {
# if renv has already been loaded, and it's the requested version of renv,
# nothing to do
spec <- .getNamespaceInfo(.getNamespace("renv"), "spec")
if (identical(spec[["version"]], version))
return(invisible(TRUE))
# otherwise, unload and attempt to load the correct version of renv
unloadNamespace("renv")
}
# load bootstrap tools
bootstrap <- function(version, library) {
# read repos (respecting override if set)
repos <- Sys.getenv("RENV_CONFIG_REPOS_OVERRIDE", unset = NA)
if (is.na(repos))
repos <- getOption("repos")
# fix up repos
on.exit(options(repos = repos), add = TRUE)
repos[repos == "@CRAN@"] <- "https://cloud.r-project.org"
options(repos = repos)
# attempt to download renv
tarball <- tryCatch(renv_bootstrap_download(version), error = identity)
if (inherits(tarball, "error"))
stop("failed to download renv ", version)
# now attempt to install
status <- tryCatch(renv_bootstrap_install(version, tarball, library), error = identity)
if (inherits(status, "error"))
stop("failed to install renv ", version)
}
renv_bootstrap_download_impl <- function(url, destfile) {
mode <- "wb"
# https://bugs.r-project.org/bugzilla/show_bug.cgi?id=17715
fixup <-
Sys.info()[["sysname"]] == "Windows" &&
substring(url, 1L, 5L) == "file:"
if (fixup)
mode <- "w+b"
download.file(
url = url,
destfile = destfile,
mode = mode,
quiet = TRUE
)
}
renv_bootstrap_download <- function(version) {
# if the renv version number has 4 components, assume it must
# be retrieved via github
nv <- numeric_version(version)
components <- unclass(nv)[[1]]
methods <- if (length(components) == 4L) {
list(renv_bootstrap_download_github)
} else {
list(
renv_bootstrap_download_cran_latest,
renv_bootstrap_download_cran_archive
)
}
for (method in methods) {
path <- tryCatch(method(version), error = identity)
if (is.character(path) && file.exists(path))
return(path)
}
stop("failed to download renv ", version)
}
renv_bootstrap_download_cran_latest <- function(version) {
# check for renv on CRAN matching this version
db <- as.data.frame(available.packages(), stringsAsFactors = FALSE)
entry <- db[db$Package %in% "renv" & db$Version %in% version, ]
if (nrow(entry) == 0) {
fmt <- "renv %s is not available from your declared package repositories"
stop(sprintf(fmt, version))
}
message("* Downloading renv ", version, " from CRAN ... ", appendLF = FALSE)
info <- tryCatch(
download.packages("renv", destdir = tempdir(), quiet = TRUE),
condition = identity
)
if (inherits(info, "condition")) {
message("FAILED")
return(FALSE)
}
message("OK")
info[1, 2]
}
renv_bootstrap_download_cran_archive <- function(version) {
name <- sprintf("renv_%s.tar.gz", version)
repos <- getOption("repos")
urls <- file.path(repos, "src/contrib/Archive/renv", name)
destfile <- file.path(tempdir(), name)
message("* Downloading renv ", version, " from CRAN archive ... ", appendLF = FALSE)
for (url in urls) {
status <- tryCatch(
renv_bootstrap_download_impl(url, destfile),
condition = identity
)
if (identical(status, 0L)) {
message("OK")
return(destfile)
}
}
message("FAILED")
return(FALSE)
}
renv_bootstrap_download_github <- function(version) {
enabled <- Sys.getenv("RENV_BOOTSTRAP_FROM_GITHUB", unset = "TRUE")
if (!identical(enabled, "TRUE"))
return(FALSE)
# prepare download options
pat <- Sys.getenv("GITHUB_PAT")
if (nzchar(Sys.which("curl")) && nzchar(pat)) {
fmt <- "--location --fail --header \"Authorization: token %s\""
extra <- sprintf(fmt, pat)
saved <- options("download.file.method", "download.file.extra")
options(download.file.method = "curl", download.file.extra = extra)
on.exit(do.call(base::options, saved), add = TRUE)
} else if (nzchar(Sys.which("wget")) && nzchar(pat)) {
fmt <- "--header=\"Authorization: token %s\""
extra <- sprintf(fmt, pat)
saved <- options("download.file.method", "download.file.extra")
options(download.file.method = "wget", download.file.extra = extra)
on.exit(do.call(base::options, saved), add = TRUE)
}
message("* Downloading renv ", version, " from GitHub ... ", appendLF = FALSE)
url <- file.path("https://api.github.com/repos/rstudio/renv/tarball", version)
name <- sprintf("renv_%s.tar.gz", version)
destfile <- file.path(tempdir(), name)
status <- tryCatch(
renv_bootstrap_download_impl(url, destfile),
condition = identity
)
if (!identical(status, 0L)) {
message("FAILED")
return(FALSE)
}
message("Done!")
return(destfile)
}
renv_bootstrap_install <- function(version, tarball, library) {
# attempt to install it into project library
message("* Installing renv ", version, " ... ", appendLF = FALSE)
dir.create(library, showWarnings = FALSE, recursive = TRUE)
# invoke using system2 so we can capture and report output
bin <- R.home("bin")
exe <- if (Sys.info()[["sysname"]] == "Windows") "R.exe" else "R"
r <- file.path(bin, exe)
args <- c("--vanilla", "CMD", "INSTALL", "-l", shQuote(library), shQuote(tarball))
output <- system2(r, args, stdout = TRUE, stderr = TRUE)
message("Done!")
# check for successful install
status <- attr(output, "status")
if (is.numeric(status) && !identical(status, 0L)) {
header <- "Error installing renv:"
lines <- paste(rep.int("=", nchar(header)), collapse = "")
text <- c(header, lines, output)
writeLines(text, con = stderr())
}
status
}
renv_bootstrap_prefix <- function() {
# construct version prefix
version <- paste(R.version$major, R.version$minor, sep = ".")
prefix <- paste("R", numeric_version(version)[1, 1:2], sep = "-")
# include SVN revision for development versions of R
# (to avoid sharing platform-specific artefacts with released versions of R)
devel <-
identical(R.version[["status"]], "Under development (unstable)") ||
identical(R.version[["nickname"]], "Unsuffered Consequences")
if (devel)
prefix <- paste(prefix, R.version[["svn rev"]], sep = "-r")
# build list of path components
components <- c(prefix, R.version$platform)
# include prefix if provided by user
prefix <- Sys.getenv("RENV_PATHS_PREFIX")
if (nzchar(prefix))
components <- c(prefix, components)
# build prefix
paste(components, collapse = "/")
}
renv_bootstrap_library_root <- function(project) {
path <- Sys.getenv("RENV_PATHS_LIBRARY", unset = NA)
if (!is.na(path))
return(path)
path <- Sys.getenv("RENV_PATHS_LIBRARY_ROOT", unset = NA)
if (!is.na(path))
return(file.path(path, basename(project)))
file.path(project, "renv/library")
}
renv_bootstrap_validate_version <- function(version) {
loadedversion <- utils::packageDescription("renv", fields = "Version")
if (version == loadedversion)
return(TRUE)
# assume four-component versions are from GitHub; three-component
# versions are from CRAN
components <- strsplit(loadedversion, "[.-]")[[1]]
remote <- if (length(components) == 4L)
paste("rstudio/renv", loadedversion, sep = "@")
else
paste("renv", loadedversion, sep = "@")
fmt <- paste(
"renv %1$s was loaded from project library, but this project is configured to use renv %2$s.",
"Use `renv::record(\"%3$s\")` to record renv %1$s in the lockfile.",
"Use `renv::restore(packages = \"renv\")` to install renv %2$s into the project library.",
sep = "\n"
)
msg <- sprintf(fmt, loadedversion, version, remote)
warning(msg, call. = FALSE)
FALSE
}
renv_bootstrap_load <- function(project, libpath, version) {
# try to load renv from the project library
if (!requireNamespace("renv", lib.loc = libpath, quietly = TRUE))
return(FALSE)
# warn if the version of renv loaded does not match
renv_bootstrap_validate_version(version)
# load the project
renv::load(project)
TRUE
}
# construct path to library root
root <- renv_bootstrap_library_root(project)
# construct library prefix for platform
prefix <- renv_bootstrap_prefix()
# construct full libpath
libpath <- file.path(root, prefix)
# attempt to load
if (renv_bootstrap_load(project, libpath, version))
return(TRUE)
# load failed; attempt to bootstrap
message("Bootstrapping renv ", version, " ...")
bootstrap(version, libpath)
# exit early if we're just testing bootstrap
if (!is.na(Sys.getenv("RENV_BOOTSTRAP_INSTALL_ONLY", unset = NA)))
return(TRUE)
# try again to load
if (requireNamespace("renv", lib.loc = libpath, quietly = TRUE)) {
message("Successfully installed and loaded renv ", version, ".")
return(renv::load())
}
# failed to download or load renv; warn the user
msg <- c(
"Failed to find an renv installation: the project will not be loaded.",
"Use `renv::activate()` to re-initialize the project."
)
warning(paste(msg, collapse = "\n"), call. = FALSE)
})
|
source("UI/ui_overview.R", local = TRUE)
source("UI/ui_about.R", local = TRUE)
ui <- fluidPage(
title = "COVID-19 Dashboard",
tags$style(type = "text/css", ".container-fluid {padding-left: 0px; padding-right: 0px !important;}"),
tags$style(type = "text/css", ".navbar {margin-bottom: 0px; background-color: #8B0000}"),
tags$style(type = "text/css", ".content {padding: 0px;}"),
tags$style(type = "text/css", ".row {margin-left: 0px; margin-right: 0px;}"),
tags$style(HTML(".col-sm-12 { padding: 5px; margin-bottom: -15px; }")),
tags$style(HTML(".col-sm-6 { padding: 5px; margin-bottom: -15px; }")),
navbarPage(
title = div("COVID-19 Dashboard", style = "padding-left: 10px"),
inverse = TRUE,
collapsible = TRUE,
fluid = TRUE,
tabPanel("Overview", page_overview, value = "page-overview"),
tabPanel("About", page_about, value = "page-about"),
tags$script(HTML("var header = $('.navbar > .container-fluid');
console.log(header)")
)
)
) | /ui.R | no_license | xfinalangelx/COVID-19-Dashboard | R | false | false | 1,000 | r | source("UI/ui_overview.R", local = TRUE)
source("UI/ui_about.R", local = TRUE)
ui <- fluidPage(
title = "COVID-19 Dashboard",
tags$style(type = "text/css", ".container-fluid {padding-left: 0px; padding-right: 0px !important;}"),
tags$style(type = "text/css", ".navbar {margin-bottom: 0px; background-color: #8B0000}"),
tags$style(type = "text/css", ".content {padding: 0px;}"),
tags$style(type = "text/css", ".row {margin-left: 0px; margin-right: 0px;}"),
tags$style(HTML(".col-sm-12 { padding: 5px; margin-bottom: -15px; }")),
tags$style(HTML(".col-sm-6 { padding: 5px; margin-bottom: -15px; }")),
navbarPage(
title = div("COVID-19 Dashboard", style = "padding-left: 10px"),
inverse = TRUE,
collapsible = TRUE,
fluid = TRUE,
tabPanel("Overview", page_overview, value = "page-overview"),
tabPanel("About", page_about, value = "page-about"),
tags$script(HTML("var header = $('.navbar > .container-fluid');
console.log(header)")
)
)
) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aiplatform_functions.R
\name{projects.locations.tensorboards.get}
\alias{projects.locations.tensorboards.get}
\title{Gets a Tensorboard.}
\usage{
projects.locations.tensorboards.get(name)
}
\arguments{
\item{name}{Required}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/cloud-platform
\item https://www.googleapis.com/auth/cloud-platform.read-only
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform, https://www.googleapis.com/auth/cloud-platform.read-only)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://cloud.google.com/vertex-ai/}{Google Documentation}
}
| /googleaiplatformv1.auto/man/projects.locations.tensorboards.get.Rd | no_license | justinjm/autoGoogleAPI | R | false | true | 923 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aiplatform_functions.R
\name{projects.locations.tensorboards.get}
\alias{projects.locations.tensorboards.get}
\title{Gets a Tensorboard.}
\usage{
projects.locations.tensorboards.get(name)
}
\arguments{
\item{name}{Required}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/cloud-platform
\item https://www.googleapis.com/auth/cloud-platform.read-only
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform, https://www.googleapis.com/auth/cloud-platform.read-only)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://cloud.google.com/vertex-ai/}{Google Documentation}
}
|
library(dataverse)
library(lubridate)
?datasets::beaver2
b <- datasets::beaver2
b <- b %>%
mutate(date = make_datetime(year=1990, hour=time %/% 100, min=time %% 100)) %>%
mutate(date = date + days(day))
data <- tibble(mins = mins, temp = b$temp)
ggplot(b, aes(x = date, y = temp, color=activ)) + geom_line()
b
| /Exercise07/R/Task07/kai.R | no_license | kai-dat18/advanced_scripting_github | R | false | false | 320 | r | library(dataverse)
library(lubridate)
?datasets::beaver2
b <- datasets::beaver2
b <- b %>%
mutate(date = make_datetime(year=1990, hour=time %/% 100, min=time %% 100)) %>%
mutate(date = date + days(day))
data <- tibble(mins = mins, temp = b$temp)
ggplot(b, aes(x = date, y = temp, color=activ)) + geom_line()
b
|
# Packages ----------------------------------------------------------------
library(tidyverse)
library(ggrepel)
library(readxl)
library(scales)
library(extrafont)
library(janitor)
library(tigris)
library(here)
# Load stuff --------------------------------------------------------------
source(here("R", "tfff-themes.R"))
source(here("R", "functions.R"))
# Get Data ----------------------------------------------------------------
oregon_counties_geodata <- dk_oregon_counties_geodata()
tribes <- read_excel(here("data", "obtn-by-county.xlsx"),
sheet = "Tribes") %>%
clean_names() %>%
gather(key = "tribe", value = "present", -geography) %>%
drop_na(present) %>%
mutate(present = "Y") %>%
right_join(oregon_counties_geodata, by = c("geography" = "name"))
# Plot --------------------------------------------------------------------
ggplot(oregon_counties_geodata) +
geom_sf(fill = tfff_light_gray,
color = "white",
size = .25) +
geom_sf(data = tribes,
fill = tfff_dark_green) +
coord_sf(datum = NA) +
scale_fill_manual(values = tfff_choropleth_colors) +
tfff_map_theme +
theme(legend.position = "bottom")
| /R/tribes.R | no_license | dgkeyes/obtn-2019 | R | false | false | 1,243 | r | # Packages ----------------------------------------------------------------
library(tidyverse)
library(ggrepel)
library(readxl)
library(scales)
library(extrafont)
library(janitor)
library(tigris)
library(here)
# Load stuff --------------------------------------------------------------
source(here("R", "tfff-themes.R"))
source(here("R", "functions.R"))
# Get Data ----------------------------------------------------------------
oregon_counties_geodata <- dk_oregon_counties_geodata()
tribes <- read_excel(here("data", "obtn-by-county.xlsx"),
sheet = "Tribes") %>%
clean_names() %>%
gather(key = "tribe", value = "present", -geography) %>%
drop_na(present) %>%
mutate(present = "Y") %>%
right_join(oregon_counties_geodata, by = c("geography" = "name"))
# Plot --------------------------------------------------------------------
ggplot(oregon_counties_geodata) +
geom_sf(fill = tfff_light_gray,
color = "white",
size = .25) +
geom_sf(data = tribes,
fill = tfff_dark_green) +
coord_sf(datum = NA) +
scale_fill_manual(values = tfff_choropleth_colors) +
tfff_map_theme +
theme(legend.position = "bottom")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/happiness.R
\docType{data}
\name{happiness}
\alias{happiness}
\title{Happiness Dataset}
\format{
A data frame with 460 rows and 11 variables:
\describe{
\item{\code{ID}}{character. A unique identifier.}
\item{\code{Date}}{date. Date of the interview.}
\item{\code{Sex}}{factor. Sex coded as \code{Male} or \code{Female}.}
\item{\code{Race}}{factor. Race coded as an 8-level factor.}
\item{\code{Age}}{integer. Age in years.}
\item{\code{Education}}{factor. Education coded as a 13-level factor.}
\item{\code{Income}}{double. Annual income in US dollars.}
\item{\code{IQ}}{double. Adult intelligence quotient. This
variable has a large amount of missing data.}
\item{\code{Zip}}{character. USPS Zip code.}
\item{\code{Children}}{integer. Number of children.}
\item{\code{Happy}}{factor. Agreement with the statement
"I am happy most of the time", coded as \code{Strongly Disagree} ,
\code{Disagree}, \code{Neutral}, \code{Agree}, or
\code{Strongly Agree}.}
}
}
\source{
The data were randomly generated using functions from the
\href{https://cran.r-project.org/web/packages/wakefield/index.html}{wakefield}
package.
}
\usage{
happiness
}
\description{
A data frame containing a happiness survey and demographic data.
This data are fictitious.
}
\keyword{datasets}
| /man/happiness.Rd | permissive | Rkabacoff/edatools | R | false | true | 1,341 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/happiness.R
\docType{data}
\name{happiness}
\alias{happiness}
\title{Happiness Dataset}
\format{
A data frame with 460 rows and 11 variables:
\describe{
\item{\code{ID}}{character. A unique identifier.}
\item{\code{Date}}{date. Date of the interview.}
\item{\code{Sex}}{factor. Sex coded as \code{Male} or \code{Female}.}
\item{\code{Race}}{factor. Race coded as an 8-level factor.}
\item{\code{Age}}{integer. Age in years.}
\item{\code{Education}}{factor. Education coded as a 13-level factor.}
\item{\code{Income}}{double. Annual income in US dollars.}
\item{\code{IQ}}{double. Adult intelligence quotient. This
variable has a large amount of missing data.}
\item{\code{Zip}}{character. USPS Zip code.}
\item{\code{Children}}{integer. Number of children.}
\item{\code{Happy}}{factor. Agreement with the statement
"I am happy most of the time", coded as \code{Strongly Disagree} ,
\code{Disagree}, \code{Neutral}, \code{Agree}, or
\code{Strongly Agree}.}
}
}
\source{
The data were randomly generated using functions from the
\href{https://cran.r-project.org/web/packages/wakefield/index.html}{wakefield}
package.
}
\usage{
happiness
}
\description{
A data frame containing a happiness survey and demographic data.
This data are fictitious.
}
\keyword{datasets}
|
# ====================================================================================================
# Author: Aaron
# Function: WGCNA for caudate
# Several parameters of blockwiseModules can be changed to get better performance
# 1. high minKMEtostay will make module smaller and module genes with higher KME in each module, lowKMEtostay may make modult split
# 2. unsign network will make smaller modules, genes in singed network corralated with status in the same direction
# 3. bicor is more robust than pearson, but bicor may not suit, choose pearson
# Version: 1.0
# Date: Oct 15, 2018
# ====================================================================================================
rm(list=ls())
library(DESeq2)
library(WGCNA)
enableWGCNAThreads()
options(stringsAsFactors = F)
# Use r-log transformed data from DESeq2, covariates such as SEX, AGE, BMI, RIN were also regress out by a linear regression
tmethod <- "covar_prepare"
tpath <- paste0("/data/MyProgram/Final_diabrain/7.interdata/6_WGCNA/", tmethod)
id2symbol <- read.table("/data/MyProgram/Final_diabrain/1.clean/id2symbol.tab", stringsAsFactors = F, header = T)
setwd(tpath)
#-------------------------------------------------------------------------------------------------
# I. Check data
#-------------------------------------------------------------------------------------------------
# specify which tissue to research
mytissue <- "Brain_Caudate"
fname <- dir("./")
tname <- sub("\\.RData", "", fname)
#for (mytissue in tname) {
load(paste0(mytissue, ".RData"))
datTraits <- mycol[, c(-1, -2, -9)]
datTraits$SEX <- as.numeric(as.character(datTraits$SEX))
datTraits$RACE <- as.numeric((as.character(datTraits$RACE)))
datTraits$DIABETES <- as.numeric(as.character(datTraits$DIABETES))
rownames(datTraits) <- make.names(mycol$SAMPID)
datExpr = as.data.frame(t(dds_residual))
all(rownames(datExpr)==rownames(datTraits))
nGenes <- dim(datExpr)[2]
nSamples <- dim(datExpr)[1]
#-------------------------------------------------------------------------------------------------
# II. Pick Soft Threshold
#-------------------------------------------------------------------------------------------------
# Choose power
powers = c(c(1:10), seq(from = 12, to=20, by=2))
# Call the network topology analysis function
system.time(sft <- pickSoftThreshold(datExpr, powerVector = powers, verbose = 5, blockSize = 30000, RsquaredCut = 0.90))
# Plot the results:
#sizeGrWindow(9, 5)
pdf(file = paste0("../../../4.plots/WGCNA_Caudate/softpower_",mytissue,".pdf"), width = 9, height = 5);
par(mfrow = c(1,2));
cex1 = 0.9;
# Scale-free topology fit index as a function of the soft-thresholding power
plot(sft$fitIndices[,1], -sign(sft$fitIndices[,3])*sft$fitIndices[,2],
xlab="Soft Threshold (power)",ylab="Scale Free Topology Model Fit,signed R^2",type="n",
main = paste("Scale independence"));
text(sft$fitIndices[,1], -sign(sft$fitIndices[,3])*sft$fitIndices[,2],
labels=powers,cex=cex1,col="red");
# this line corresponds to using an R^2 cut-off of h
abline(h=0.90,col="red")
# Mean connectivity as a function of the soft-thresholding power
plot(sft$fitIndices[,1], sft$fitIndices[,5],
xlab="Soft Threshold (power)",ylab="Mean Connectivity", type="n",
main = paste("Mean connectivity "))
text(sft$fitIndices[,1], sft$fitIndices[,5], labels=powers, cex=cex1,col="red")
dev.off()
#-------------------------------------------------------------------------------------------------
# III. Net Construction
#-------------------------------------------------------------------------------------------------
collectGarbage();
spower <- sft$powerEstimate
if (is.na(spower)) {
spower <- sft$fitIndices[sft$fitIndices[,2]==max(sft$fitIndices[,2]), 1]
}
aconnect <- softConnectivity(datExpr, type = "signed", power = spower, blockSize = 30000)
# One-step
bwnet <- blockwiseModules(datExpr, maxBlockSize = 30000,
power = spower, TOMType = "unsigned",
minModuleSize = 40, reassignThreshold = 0,
mergeCutHeight = 0.15, numericLabels = TRUE,
corType = "pearson", networkType = "signed",
minKMEtoStay = 0.7,
saveTOMs = FALSE, verbose = 3)
table(bwnet$colors)
bwnetLabels <- bwnet$colors
bwnetColors <- labels2colors(bwnetLabels)
bw_label2color <- data.frame(Labels = bwnetLabels, Colors = bwnetColors)
bw_label2color <- unique(bw_label2color)
write.table(bw_label2color, file = paste0("../", mytissue, "_bw_label2color.txt"), quote = F, sep = "\t", row.names = F)
pdf(file = paste0("../../../4.plots/WGCNA_Caudate/DendroColor_",mytissue,".pdf"), width = 12, height = 8);
plotDendroAndColors(bwnet$dendrograms[[1]], bwnetColors,
sub("Brain_", "", mytissue), dendroLabels = FALSE, hang = 0.03,
main = paste0("Gene dendrogram and module colors of ", sub("Brain_", "", mytissue)),
addGuide = TRUE, guideHang = 0.05)
dev.off()
mod2gene <- data.frame(ModID = bwnetColors, geneID = colnames(datExpr), stringsAsFactors = F)
mod2gene <- mod2gene[order(mod2gene$ModID), ]
mod2gene <- data.frame(Mod = paste0("ME", mod2gene$ModID), mod2gene, stringsAsFactors = F)
mod2gene <- mod2gene[, -2]
tmp <- dir("../../../2.results/Cov_optimal/result05/res/", full.names = T)
DEgene <- read.table(tmp[grep(mytissue, tmp)], header = T, stringsAsFactors = F)
togene <- data.frame(Mod = 'TotalGene', geneID = DEgene$Gene)
upgene <- data.frame(Mod = "UpGene", geneID = DEgene$Gene[DEgene$log2FoldChange > 0])
downgene <- data.frame(Mod = "DownGene", geneID = DEgene$Gene[DEgene$log2FoldChange < 0])
mod2gene <- rbind(mod2gene, togene, upgene, downgene)
mod2gene$geneID <- id2symbol$Description[match(mod2gene$geneID, id2symbol$Name)]
save(mod2gene, bwnet, datExpr, datTraits, bwnetLabels, bwnetColors, nGenes, nSamples, bw_label2color, file = paste0("../", mytissue, "_covar_WGCNAres_v1.0.RData"))
#}
| /6_WGCNA/6.3_WGCNA_covar_caudate.R | no_license | ZedekiahZhou/T2D_Brain | R | false | false | 6,385 | r |
# ====================================================================================================
# Author: Aaron
# Function: WGCNA for caudate
# Several parameters of blockwiseModules can be changed to get better performance
# 1. high minKMEtostay will make module smaller and module genes with higher KME in each module, lowKMEtostay may make modult split
# 2. unsign network will make smaller modules, genes in singed network corralated with status in the same direction
# 3. bicor is more robust than pearson, but bicor may not suit, choose pearson
# Version: 1.0
# Date: Oct 15, 2018
# ====================================================================================================
rm(list=ls())
library(DESeq2)
library(WGCNA)
enableWGCNAThreads()
options(stringsAsFactors = F)
# Use r-log transformed data from DESeq2, covariates such as SEX, AGE, BMI, RIN were also regress out by a linear regression
tmethod <- "covar_prepare"
tpath <- paste0("/data/MyProgram/Final_diabrain/7.interdata/6_WGCNA/", tmethod)
id2symbol <- read.table("/data/MyProgram/Final_diabrain/1.clean/id2symbol.tab", stringsAsFactors = F, header = T)
setwd(tpath)
#-------------------------------------------------------------------------------------------------
# I. Check data
#-------------------------------------------------------------------------------------------------
# specify which tissue to research
mytissue <- "Brain_Caudate"
fname <- dir("./")
tname <- sub("\\.RData", "", fname)
#for (mytissue in tname) {
load(paste0(mytissue, ".RData"))
datTraits <- mycol[, c(-1, -2, -9)]
datTraits$SEX <- as.numeric(as.character(datTraits$SEX))
datTraits$RACE <- as.numeric((as.character(datTraits$RACE)))
datTraits$DIABETES <- as.numeric(as.character(datTraits$DIABETES))
rownames(datTraits) <- make.names(mycol$SAMPID)
datExpr = as.data.frame(t(dds_residual))
all(rownames(datExpr)==rownames(datTraits))
nGenes <- dim(datExpr)[2]
nSamples <- dim(datExpr)[1]
#-------------------------------------------------------------------------------------------------
# II. Pick Soft Threshold
#-------------------------------------------------------------------------------------------------
# Choose power
powers = c(c(1:10), seq(from = 12, to=20, by=2))
# Call the network topology analysis function
system.time(sft <- pickSoftThreshold(datExpr, powerVector = powers, verbose = 5, blockSize = 30000, RsquaredCut = 0.90))
# Plot the results:
#sizeGrWindow(9, 5)
pdf(file = paste0("../../../4.plots/WGCNA_Caudate/softpower_",mytissue,".pdf"), width = 9, height = 5);
par(mfrow = c(1,2));
cex1 = 0.9;
# Scale-free topology fit index as a function of the soft-thresholding power
plot(sft$fitIndices[,1], -sign(sft$fitIndices[,3])*sft$fitIndices[,2],
xlab="Soft Threshold (power)",ylab="Scale Free Topology Model Fit,signed R^2",type="n",
main = paste("Scale independence"));
text(sft$fitIndices[,1], -sign(sft$fitIndices[,3])*sft$fitIndices[,2],
labels=powers,cex=cex1,col="red");
# this line corresponds to using an R^2 cut-off of h
abline(h=0.90,col="red")
# Mean connectivity as a function of the soft-thresholding power
plot(sft$fitIndices[,1], sft$fitIndices[,5],
xlab="Soft Threshold (power)",ylab="Mean Connectivity", type="n",
main = paste("Mean connectivity "))
text(sft$fitIndices[,1], sft$fitIndices[,5], labels=powers, cex=cex1,col="red")
dev.off()
#-------------------------------------------------------------------------------------------------
# III. Net Construction
#-------------------------------------------------------------------------------------------------
collectGarbage();
spower <- sft$powerEstimate
if (is.na(spower)) {
spower <- sft$fitIndices[sft$fitIndices[,2]==max(sft$fitIndices[,2]), 1]
}
aconnect <- softConnectivity(datExpr, type = "signed", power = spower, blockSize = 30000)
# One-step
bwnet <- blockwiseModules(datExpr, maxBlockSize = 30000,
power = spower, TOMType = "unsigned",
minModuleSize = 40, reassignThreshold = 0,
mergeCutHeight = 0.15, numericLabels = TRUE,
corType = "pearson", networkType = "signed",
minKMEtoStay = 0.7,
saveTOMs = FALSE, verbose = 3)
table(bwnet$colors)
bwnetLabels <- bwnet$colors
bwnetColors <- labels2colors(bwnetLabels)
bw_label2color <- data.frame(Labels = bwnetLabels, Colors = bwnetColors)
bw_label2color <- unique(bw_label2color)
write.table(bw_label2color, file = paste0("../", mytissue, "_bw_label2color.txt"), quote = F, sep = "\t", row.names = F)
pdf(file = paste0("../../../4.plots/WGCNA_Caudate/DendroColor_",mytissue,".pdf"), width = 12, height = 8);
plotDendroAndColors(bwnet$dendrograms[[1]], bwnetColors,
sub("Brain_", "", mytissue), dendroLabels = FALSE, hang = 0.03,
main = paste0("Gene dendrogram and module colors of ", sub("Brain_", "", mytissue)),
addGuide = TRUE, guideHang = 0.05)
dev.off()
mod2gene <- data.frame(ModID = bwnetColors, geneID = colnames(datExpr), stringsAsFactors = F)
mod2gene <- mod2gene[order(mod2gene$ModID), ]
mod2gene <- data.frame(Mod = paste0("ME", mod2gene$ModID), mod2gene, stringsAsFactors = F)
mod2gene <- mod2gene[, -2]
tmp <- dir("../../../2.results/Cov_optimal/result05/res/", full.names = T)
DEgene <- read.table(tmp[grep(mytissue, tmp)], header = T, stringsAsFactors = F)
togene <- data.frame(Mod = 'TotalGene', geneID = DEgene$Gene)
upgene <- data.frame(Mod = "UpGene", geneID = DEgene$Gene[DEgene$log2FoldChange > 0])
downgene <- data.frame(Mod = "DownGene", geneID = DEgene$Gene[DEgene$log2FoldChange < 0])
mod2gene <- rbind(mod2gene, togene, upgene, downgene)
mod2gene$geneID <- id2symbol$Description[match(mod2gene$geneID, id2symbol$Name)]
save(mod2gene, bwnet, datExpr, datTraits, bwnetLabels, bwnetColors, nGenes, nSamples, bw_label2color, file = paste0("../", mytissue, "_covar_WGCNAres_v1.0.RData"))
#}
|
library(shiny)
shinyUI(fluidPage(
titlePanel("Next Word Predictor App"),
sidebarLayout(
sidebarPanel(
h5('Start entering your sentence. Please enter space after last word and wait for your results.'),
textInput(inputId="text1", label = "Your Input", "This is a beautiful "),
submitButton('Submit')
),
mainPanel(
h5('Your Possible Next Word'),
textOutput('text1')
)
)))
| /ui.R | no_license | GauravTejwani/capstoneprojectapp | R | false | false | 403 | r | library(shiny)
shinyUI(fluidPage(
titlePanel("Next Word Predictor App"),
sidebarLayout(
sidebarPanel(
h5('Start entering your sentence. Please enter space after last word and wait for your results.'),
textInput(inputId="text1", label = "Your Input", "This is a beautiful "),
submitButton('Submit')
),
mainPanel(
h5('Your Possible Next Word'),
textOutput('text1')
)
)))
|
#' Error measures
#'
#' Functions allow to calculate different types of errors for point and
#' interval predictions:
#' \enumerate{
#' \item MAE - Mean Absolute Error,
#' \item MSE - Mean Squared Error,
#' \item MRE - Mean Root Error (Kourentzes, 2014),
#' \item MIS - Mean Interval Score (Gneiting & Raftery, 2007),
#' \item MPE - Mean Percentage Error,
#' \item MAPE - Mean Absolute Percentage Error (See Svetunkov, 2017 for
#' the critique),
#' \item MASE - Mean Absolute Scaled Error (Hyndman & Koehler, 2006)),
#' \item rMAE - Relative Mean Absolute Error (Davydenko & Fildes, 2013),
#' \item rRMSE - Relative Root Mean Squared Error,
#' \item rAME - Relative Absolute Mean Error,
#' \item rMIS - Relative Mean Interval Score,
#' \item sMSE - Scaled Mean Squared Error (Petropoulos & Kourentzes, 2015),
#' \item sPIS- Scaled Periods-In-Stock (Wallstrom & Segerstedt, 2010),
#' \item sCE - Scaled Cumulative Error,
#' \item sMIS - Scaled Mean Interval Score.
#' }
#'
#' In case of \code{sMSE}, \code{scale} needs to be a squared value. Typical
#' one -- squared mean value of in-sample actuals.
#'
#' If all the measures are needed, then \link[greybox]{measures} function
#' can help.
#'
#' There are several other measures, see details of \link[greybox]{pinball}
#' and \link[greybox]{hm}.
#'
#' @template author
#'
#' @aliases Errors
#' @param actual The vector or matrix of actual values.
#' @param forecast The vector or matrix of forecasts values.
#' @param lower The lower bound of the prediction interval.
#' @param upper The upper bound of the prediction interval.
#' @param scale The value that should be used in the denominator of MASE. Can
#' be anything but advised values are: mean absolute deviation of in-sample one
#' step ahead Naive error or mean absolute value of the in-sample actuals.
#' @param benchmark The vector or matrix of the forecasts of the benchmark
#' model.
#' @param benchmarkLower The lower bound of the prediction interval of the
#' benchmark model.
#' @param benchmarkUpper The upper bound of the prediction interval of the
#' benchmark model.
#' @param level The confidence level of the constructed interval.
#' @return All the functions return the scalar value.
#' @references \itemize{
#' \item Kourentzes N. (2014). The Bias Coefficient: a new metric for forecast bias
#' \url{https://kourentzes.com/forecasting/2014/12/17/the-bias-coefficient-a-new-metric-for-forecast-bias/}
#' \item Svetunkov, I. (2017). Naughty APEs and the quest for the holy grail.
#' \url{https://forecasting.svetunkov.ru/en/2017/07/29/naughty-apes-and-the-quest-for-the-holy-grail/}
#' \item Fildes R. (1992). The evaluation of
#' extrapolative forecasting methods. International Journal of Forecasting, 8,
#' pp.81-98.
#' \item Hyndman R.J., Koehler A.B. (2006). Another look at measures of
#' forecast accuracy. International Journal of Forecasting, 22, pp.679-688.
#' \item Petropoulos F., Kourentzes N. (2015). Forecast combinations for
#' intermittent demand. Journal of the Operational Research Society, 66,
#' pp.914-924.
#' \item Wallstrom P., Segerstedt A. (2010). Evaluation of forecasting error
#' measurements and techniques for intermittent demand. International Journal
#' of Production Economics, 128, pp.625-636.
#' \item Davydenko, A., Fildes, R. (2013). Measuring Forecasting Accuracy:
#' The Case Of Judgmental Adjustments To Sku-Level Demand Forecasts.
#' International Journal of Forecasting, 29(3), 510-522.
#' \url{https://doi.org/10.1016/j.ijforecast.2012.09.002}
#' \item Gneiting, T., & Raftery, A. E. (2007). Strictly proper scoring rules,
#' prediction, and estimation. Journal of the American Statistical Association,
#' 102(477), 359–378. \url{https://doi.org/10.1198/016214506000001437}
#' }
#'
#' @seealso \link[greybox]{pinball}, \link[greybox]{hm}, \link[greybox]{measures}
#'
#' @examples
#'
#'
#' y <- rnorm(100,10,2)
#' testForecast <- rep(mean(y[1:90]),10)
#'
#' MAE(y[91:100],testForecast)
#' MSE(y[91:100],testForecast)
#'
#' MPE(y[91:100],testForecast)
#' MAPE(y[91:100],testForecast)
#'
#' # Measures from Petropoulos & Kourentzes (2015)
#' MASE(y[91:100],testForecast,mean(abs(y[1:90])))
#' sMSE(y[91:100],testForecast,mean(abs(y[1:90]))^2)
#' sPIS(y[91:100],testForecast,mean(abs(y[1:90])))
#' sCE(y[91:100],testForecast,mean(abs(y[1:90])))
#'
#' # Original MASE from Hyndman & Koehler (2006)
#' MASE(y[91:100],testForecast,mean(abs(diff(y[1:90]))))
#'
#' testForecast2 <- rep(y[91],10)
#' # Relative measures, from and inspired by Davydenko & Fildes (2013)
#' rMAE(y[91:100],testForecast2,testForecast)
#' rRMSE(y[91:100],testForecast2,testForecast)
#' rAME(y[91:100],testForecast2,testForecast)
#'
#' #### Measures for the prediction intervals
#' # An example with mtcars data
#' ourModel <- alm(mpg~., mtcars[1:30,], distribution="dnorm")
#' ourBenchmark <- alm(mpg~1, mtcars[1:30,], distribution="dnorm")
#'
#' # Produce predictions with the interval
#' ourForecast <- predict(ourModel, mtcars[-c(1:30),], interval="p")
#' ourBenchmarkForecast <- predict(ourBenchmark, mtcars[-c(1:30),], interval="p")
#'
#' MIS(mtcars$mpg[-c(1:30)],ourForecast$lower,ourForecast$upper,0.95)
#' sMIS(mtcars$mpg[-c(1:30)],ourForecast$lower,ourForecast$upper,mean(mtcars$mpg[1:30]),0.95)
#' rMIS(mtcars$mpg[-c(1:30)],ourForecast$lower,ourForecast$upper,
#' ourBenchmarkForecast$lower,ourBenchmarkForecast$upper,0.95)
#'
#' ### Also, see pinball function for other measures for the intervals
#'
#' @rdname error-measures
#' @rdname error-measures
#' @export MAE
#' @aliases MAE
MAE <- function(actual,forecast){
# This function calculates Mean Absolute Error
# actual - actual values,
# forecast - forecasted values.
if(length(actual) != length(forecast)){
message("The length of the provided data differs.");
message(paste0("Length of actual: ",length(actual)));
message(paste0("Length of forecast: ",length(forecast)));
stop("Cannot proceed.",call.=FALSE);
}
else{
return(mean(abs(actual-forecast),na.rm=TRUE));
}
}
#' @rdname error-measures
#' @export MSE
#' @aliases MSE
MSE <- function(actual,forecast){
# This function calculates Mean squared Error
# actual - actual values,
# forecast - forecasted values.
if(length(actual) != length(forecast)){
message("The length of the provided data differs.");
message(paste0("Length of actual: ",length(actual)));
message(paste0("Length of forecast: ",length(forecast)));
stop("Cannot proceed.",call.=FALSE);
}
else{
return(mean((actual-forecast)^2,na.rm=TRUE));
}
}
#' @rdname error-measures
#' @export MRE
#' @aliases MRE
MRE <- function(actual,forecast){
# This function calculates Mean squared Error
# actual - actual values,
# forecast - forecasted values.
if(length(actual) != length(forecast)){
message("The length of the provided data differs.");
message(paste0("Length of actual: ",length(actual)));
message(paste0("Length of forecast: ",length(forecast)));
stop("Cannot proceed.",call.=FALSE);
}
else{
return(mean(sqrt(as.complex(actual-forecast)),na.rm=TRUE));
}
}
#' @rdname error-measures
#' @export MIS
#' @aliases MIS
MIS <- function(actual,lower,upper,level=0.95){
# This function calculates Mean Interval Score from Gneiting & Raftery, 2007
# actual - actual values,
# lower - the lower bound of the interval,
# upper - the upper bound of the interval,
if(level>1){
level[] <- level / 100;
}
alpha <- 1-level;
lengthsVector <- c(length(actual),length(upper),length(lower))
if(any(lengthsVector>min(lengthsVector))){
message("The length of the provided data differs.");
message(paste0("Length of actual: ",length(actual)));
message(paste0("Length of lower: ",length(lower)));
message(paste0("Length of upper: ",length(upper)));
stop("Cannot proceed.",call.=FALSE);
}
else{
h <- length(actual);
MISValue <- sum(upper-lower) + 2/alpha*(sum((lower-actual)*(actual<lower)) + sum((actual-upper)*(actual>upper)));
MISValue[] <- MISValue / h;
return(MISValue);
}
}
#' @rdname error-measures
#' @export MPE
#' @aliases MPE
MPE <- function(actual,forecast){
# This function calculates Mean / Median Percentage Error
# actual - actual values,
# forecast - forecasted or fitted values.
if(length(actual) != length(forecast)){
message("The length of the provided data differs.");
message(paste0("Length of actual: ",length(actual)));
message(paste0("Length of forecast: ",length(forecast)));
stop("Cannot proceed.",call.=FALSE);
}
else{
return(mean((actual-forecast)/actual,na.rm=TRUE));
}
}
#' @rdname error-measures
#' @export MAPE
#' @aliases MAPE
MAPE <- function(actual,forecast){
# This function calculates Mean Absolute Percentage Error
# actual - actual values,
# forecast - forecasted values.
if(length(actual) != length(forecast)){
message("The length of the provided data differs.");
message(paste0("Length of actual: ",length(actual)));
message(paste0("Length of forecast: ",length(forecast)));
stop("Cannot proceed.",call.=FALSE);
}
else{
return(mean(abs((actual-forecast)/actual),na.rm=TRUE));
}
}
#' @rdname error-measures
#' @export MASE
#' @aliases MASE
MASE <- function(actual,forecast,scale){
# This function calculates Mean Absolute Scaled Error as in Hyndman & Koehler, 2006
# actual - actual values,
# forecast - forecasted values.
# scale - the measure to scale errors with. Usually - MAE of in-sample.
if(length(actual) != length(forecast)){
message("The length of the provided data differs.");
message(paste0("Length of actual: ",length(actual)));
message(paste0("Length of forecast: ",length(forecast)));
stop("Cannot proceed.",call.=FALSE);
}
else{
return(mean(abs(actual-forecast),na.rm=TRUE)/scale);
}
}
#' @rdname error-measures
#' @export rMAE
#' @aliases rMAE
rMAE <-function(actual,forecast,benchmark){
# This function calculates Average Relative MAE
# actual - actual values,
# forecast - forecasted or fitted values.
# benchmark - forecasted or fitted values of etalon method.
if((length(actual) != length(forecast)) | (length(actual) != length(benchmark)) | (length(benchmark) != length(forecast))){
message("The length of the provided data differs.");
message(paste0("Length of actual: ",length(actual)));
message(paste0("Length of forecast: ",length(forecast)));
message(paste0("Length of benchmark: ",length(benchmark)));
stop("Cannot proceed.",call.=FALSE);
}
else{
if(all(forecast==benchmark)){
return(1);
}
else{
return(mean(abs(actual-forecast),na.rm=TRUE)/
mean(abs(actual-benchmark),na.rm=TRUE));
}
}
}
#' @rdname error-measures
#' @export rRMSE
#' @aliases rRMSE
rRMSE <-function(actual,forecast,benchmark){
# This function calculates Relative MSE
# actual - actual values,
# forecast - forecasted or fitted values.
# benchmark - forecasted or fitted values of etalon method.
if((length(actual) != length(forecast)) | (length(actual) != length(benchmark)) | (length(benchmark) != length(forecast))){
message("The length of the provided data differs.");
message(paste0("Length of actual: ",length(actual)));
message(paste0("Length of forecast: ",length(forecast)));
message(paste0("Length of benchmark: ",length(benchmark)));
stop("Cannot proceed.",call.=FALSE);
}
else{
if(all(forecast==benchmark)){
return(1);
}
else{
return(sqrt(mean((actual-forecast)^2,na.rm=TRUE)/
mean((actual-benchmark)^2,na.rm=TRUE)));
}
}
}
#' @rdname error-measures
#' @export rAME
#' @aliases rAME
rAME <-function(actual,forecast,benchmark){
# This function calculates Relative Absolute ME
# actual - actual values,
# forecast - forecasted or fitted values.
# benchmark - forecasted or fitted values of etalon method.
if((length(actual) != length(forecast)) | (length(actual) != length(benchmark)) | (length(benchmark) != length(forecast))){
message("The length of the provided data differs.");
message(paste0("Length of actual: ",length(actual)));
message(paste0("Length of forecast: ",length(forecast)));
message(paste0("Length of benchmark: ",length(benchmark)));
stop("Cannot proceed.",call.=FALSE);
}
else{
if(all(forecast==benchmark)){
return(1);
}
else{
return(abs(mean((actual-forecast),na.rm=TRUE))/
abs(mean((actual-benchmark),na.rm=TRUE)));
}
}
}
#' @rdname error-measures
#' @export rMIS
#' @aliases rMIS
rMIS <-function(actual,lower,upper,benchmarkLower,benchmarkUpper,level=0.95){
# This function calculates scaled MIS
# actual - actual values,
# forecast - forecasted values.
# scale - the measure to scale errors with.
# lower - the lower bound of the interval,
# upper - the upper bound of the interval,
# benchmarkLower - the lower bound of the interval of the benchmark method.
# benchmarkUpper - the upper bound of the interval of the benchmark method.
lengthsVector <- c(length(actual),length(upper),length(lower),length(benchmarkLower),length(benchmarkUpper));
if(any(lengthsVector>min(lengthsVector))){
message("The length of the provided data differs.");
stop("Cannot proceed.",call.=FALSE);
}
else{
return(MIS(actual=actual,lower=lower,upper=upper,level=level) /
MIS(actual=actual,lower=benchmarkLower,upper=benchmarkUpper,level=level));
}
}
#' @rdname error-measures
#' @export RelMAE
#' @aliases rMAE
RelMAE <- function(actual,forecast,benchmark){
warning("This function is depricated. Please, use rMAE instead");
return(rMAE(actual,forecast,benchmark));
}
#' @rdname error-measures
#' @export RelRMSE
#' @aliases rRMSE
RelRMSE <- function(actual,forecast,benchmark){
warning("This function is depricated. Please, use rRMSE instead");
return(rRMSE(actual,forecast,benchmark));
}
#' @rdname error-measures
#' @export RelAME
#' @aliases rAME
RelAME <- function(actual,forecast,benchmark){
warning("This function is depricated. Please, use rAME instead");
return(rAME(actual,forecast,benchmark));
}
#' @rdname error-measures
#' @export RelMIS
#' @aliases rMIS
RelMIS <- function(actual,lower,upper,benchmarkLower,benchmarkUpper,level=0.95){
warning("This function is depricated. Please, use rMIS instead");
return(rMIS(actual,lower,upper,benchmarkLower,benchmarkUpper,level));
}
#' @rdname error-measures
#' @export sMSE
#' @aliases sMSE
sMSE <- function(actual,forecast,scale){
# This function calculates scaled Mean Squared Error.
# Attention! Scale factor should be provided as squares of something!
# actual - actual values,
# forecast - forecasted values.
# scale - the measure to scale errors with. Usually - MAE of in-sample.
if(length(actual) != length(forecast)){
message("The length of the provided data differs.");
message(paste0("Length of actual: ",length(actual)));
message(paste0("Length of forecast: ",length(forecast)));
stop("Cannot proceed.",call.=FALSE);
}
else{
return(mean((actual-forecast)^2,na.rm=TRUE)/scale);
}
}
#' @rdname error-measures
#' @export sPIS
#' @aliases sPIS
sPIS <- function(actual,forecast,scale){
# This function calculates scaled Periods-In-Stock.
# actual - actual values,
# forecast - forecasted values.
# scale - the measure to scale errors with.
if(length(actual) != length(forecast)){
message("The length of the provided data differs.");
message(paste0("Length of actual: ",length(actual)));
message(paste0("Length of forecast: ",length(forecast)));
stop("Cannot proceed.",call.=FALSE);
}
else{
return(sum(cumsum(forecast-actual))/scale);
}
}
#' @rdname error-measures
#' @export sCE
#' @aliases sCE
sCE <- function(actual,forecast,scale){
# This function calculates scaled Cumulative Error.
# actual - actual values,
# forecast - forecasted values.
# scale - the measure to scale errors with.
if(length(actual) != length(forecast)){
message("The length of the provided data differs.");
message(paste0("Length of actual: ",length(actual)));
message(paste0("Length of forecast: ",length(forecast)));
stop("Cannot proceed.",call.=FALSE);
}
else{
return(sum(forecast-actual)/scale);
}
}
#' @rdname error-measures
#' @export sMIS
#' @aliases sMIS
sMIS <- function(actual,lower,upper,scale,level=0.95){
# This function calculates scaled MIS
# actual - actual values,
# forecast - forecasted values.
# scale - the measure to scale errors with.
# lower - the lower bound of the interval,
# upper - the upper bound of the interval,
# scale - the measure to scale errors with.
lengthsVector <- c(length(actual),length(upper),length(lower))
if(any(lengthsVector>min(lengthsVector))){
message("The length of the provided data differs.");
message(paste0("Length of actual: ",length(actual)));
message(paste0("Length of lower: ",length(lower)));
message(paste0("Length of upper: ",length(upper)));
stop("Cannot proceed.",call.=FALSE);
}
else{
return(MIS(actual=actual,lower=lower,upper=upper,level=level)/scale);
}
}
#' Error measures for the provided forecasts
#'
#' Function calculates several error measures using the provided
#' forecasts and the data for the holdout sample.
#'
#' @template author
#'
#' @aliases measures
#' @param holdout The vector of the holdout values.
#' @param forecast The vector of forecasts produced by a model.
#' @param actual The vector of actual in-sample values.
#' @param digits Number of digits of the output. If \code{NULL}
#' then no rounding is done.
#' @param benchmark The character variable, defining what to use as
#' benchmark for relative measures. Can be either \code{"naive"} or
#' \code{"mean"} (arithmetic mean of the whole series. The latter
#' can be useful when dealing with intermittent data.
#' @return The functions returns the named vector of errors:
#' \itemize{
#' \item MAE,
#' \item MSE
#' \item MPE,
#' \item MAPE,
#' \item MASE,
#' \item sMAE,
#' \item sMSE,
#' \item sCE,
#' \item rMAE,
#' \item rRMSE,
#' \item rAME,
#' \item cbias,
#' \item sPIS.
#' }
#' For the details on these errors, see \link[greybox]{Errors}.
#' @references \itemize{
#' \item Svetunkov, I. (2017). Naughty APEs and the quest for the holy grail.
#' \url{https://forecasting.svetunkov.ru/en/2017/07/29/naughty-apes-and-the-quest-for-the-holy-grail/}
#' \item Fildes R. (1992). The evaluation of
#' extrapolative forecasting methods. International Journal of Forecasting, 8,
#' pp.81-98.
#' \item Hyndman R.J., Koehler A.B. (2006). Another look at measures of
#' forecast accuracy. International Journal of Forecasting, 22, pp.679-688.
#' \item Petropoulos F., Kourentzes N. (2015). Forecast combinations for
#' intermittent demand. Journal of the Operational Research Society, 66,
#' pp.914-924.
#' \item Wallstrom P., Segerstedt A. (2010). Evaluation of forecasting error
#' measurements and techniques for intermittent demand. International Journal
#' of Production Economics, 128, pp.625-636.
#' \item Davydenko, A., Fildes, R. (2013). Measuring Forecasting Accuracy:
#' The Case Of Judgmental Adjustments To Sku-Level Demand Forecasts.
#' International Journal of Forecasting, 29(3), 510-522.
#' \url{https://doi.org/10.1016/j.ijforecast.2012.09.002}
#' }
#' @examples
#'
#'
#' y <- rnorm(100,10,2)
#' ourForecast <- rep(mean(y[1:90]),10)
#'
#' measures(y[91:100],ourForecast,y[1:90],digits=5)
#'
#' @export measures
measures <- function(holdout, forecast, actual, digits=NULL, benchmark=c("naive","mean")){
holdout <- as.vector(holdout);
h <- length(holdout)
forecast <- as.vector(forecast);
actual <- as.vector(actual);
benchmark <- match.arg(benchmark,c("naive","mean"));
becnhmarkForecast <- switch(benchmark,
"naive"=rep(actual[length(actual)],h),
"mean"=rep(mean(actual),h));
errormeasures <- c(MAE(holdout,forecast),
MSE(holdout,forecast),
MPE(holdout,forecast),
MAPE(holdout,forecast),
MASE(holdout,forecast,mean(abs(diff(actual)))),
MASE(holdout,forecast,mean(abs(actual))),
sMSE(holdout,forecast,mean(abs(actual[actual!=0]))^2),
sCE(holdout,forecast,mean(abs(actual[actual!=0]))),
rMAE(holdout,forecast,becnhmarkForecast),
rRMSE(holdout,forecast,becnhmarkForecast),
rAME(holdout,forecast,becnhmarkForecast),
cbias(holdout-forecast,0),
sPIS(holdout,forecast,mean(abs(actual[actual!=0]))));
if(!is.null(digits)){
errormeasures[] <- round(errormeasures, digits);
}
names(errormeasures) <- c("MAE","MSE",
"MPE","MAPE",
"MASE","sMAE","sMSE","sCE",
"rMAE","rRMSE","rAME","cbias","sPIS");
return(errormeasures);
}
#' Half moment of a distribution and its derivatives.
#'
#' \code{hm} function estimates half moment from some predefined constant
#' \code{C}. \code{ham} estimates half absolute moment. Finally, \code{cbias}
#' function returns bias based on \code{hm}.
#'
#' \code{NA} values of \code{x} are excluded on the first step of calculation.
#'
#' @template author
#'
#' @aliases hm
#' @param x A variable based on which HM is estimated.
#' @param C Centering parameter.
#' @param ... Other parameters passed to mean function.
#' @return A complex variable is returned for \code{hm} function and real values
#' are returned for \code{cbias} and \code{ham}.
#' @examples
#'
#' x <- rnorm(100,0,1)
#' hm(x)
#' ham(x)
#' cbias(x)
#'
#' @export hm
#' @rdname hm
hm <- function(x,C=mean(x),...){
# This function calculates half moment
return(mean(sqrt(as.complex(x[!is.na(x)]-C)),...));
}
#' @rdname hm
#' @export ham
#' @aliases ham
ham <- function(x,C=mean(x),...){
# This function calculates half moment
return(mean(sqrt(abs(x[!is.na(x)]-C)),...));
}
#' @rdname hm
#' @export cbias
#' @aliases cbias
cbias <- function(x,C=mean(x),...){
# This function calculates half moment
return(1 - Arg(hm(x,C,...))/(pi/4));
}
#' Pinball function
#'
#' The function returns the value from the pinball function for the specified level and
#' the type of loss
#'
#' @template author
#'
#' @param holdout The vector or matrix of the holdout values.
#' @param forecast The forecast of prediction interval (should be the same length as the
#' holdout).
#' @param level The level of the prediction interval associated with the forecast.
#' @param loss The type of loss to use. The number which corresponds to L1, L2 etc.
#' @return The function returns the scalar value.
#' @examples
#' # An example with mtcars data
#' ourModel <- alm(mpg~., mtcars[1:30,], distribution="dnorm")
#'
#' # Produce predictions with the interval
#' ourForecast <- predict(ourModel, mtcars[-c(1:30),], interval="p")
#'
#' # Pinball with the L1 (quantile value)
#' pinball(mtcars$mpg[-c(1:30)],ourForecast$upper,level=0.975,loss=1)
#' pinball(mtcars$mpg[-c(1:30)],ourForecast$lower,level=0.025,loss=1)
#'
#' # Pinball with the L2 (expectile value)
#' pinball(mtcars$mpg[-c(1:30)],ourForecast$upper,level=0.975,loss=2)
#' pinball(mtcars$mpg[-c(1:30)],ourForecast$lower,level=0.025,loss=2)
#'
#' @export pinball
pinball <- function(holdout, forecast, level, loss=1){
# This function calculates pinball cost function for the bound of prediction interval
if(length(holdout) != length(forecast)){
message("The length of the provided data differs.");
message(paste0("Length of holdout: ",length(holdout)));
message(paste0("Length of forecast: ",length(forecast)));
stop("Cannot proceed.",call.=FALSE);
}
else{
result <- ((1-level)*sum(abs((holdout-forecast))^loss * (holdout<=forecast)) +
level*sum(abs((holdout-forecast))^loss * (holdout>forecast)));
return(result);
}
}
| /R/error-measures.R | no_license | rohitpandey13/greybox | R | false | false | 24,632 | r | #' Error measures
#'
#' Functions allow to calculate different types of errors for point and
#' interval predictions:
#' \enumerate{
#' \item MAE - Mean Absolute Error,
#' \item MSE - Mean Squared Error,
#' \item MRE - Mean Root Error (Kourentzes, 2014),
#' \item MIS - Mean Interval Score (Gneiting & Raftery, 2007),
#' \item MPE - Mean Percentage Error,
#' \item MAPE - Mean Absolute Percentage Error (See Svetunkov, 2017 for
#' the critique),
#' \item MASE - Mean Absolute Scaled Error (Hyndman & Koehler, 2006)),
#' \item rMAE - Relative Mean Absolute Error (Davydenko & Fildes, 2013),
#' \item rRMSE - Relative Root Mean Squared Error,
#' \item rAME - Relative Absolute Mean Error,
#' \item rMIS - Relative Mean Interval Score,
#' \item sMSE - Scaled Mean Squared Error (Petropoulos & Kourentzes, 2015),
#' \item sPIS- Scaled Periods-In-Stock (Wallstrom & Segerstedt, 2010),
#' \item sCE - Scaled Cumulative Error,
#' \item sMIS - Scaled Mean Interval Score.
#' }
#'
#' In case of \code{sMSE}, \code{scale} needs to be a squared value. Typical
#' one -- squared mean value of in-sample actuals.
#'
#' If all the measures are needed, then \link[greybox]{measures} function
#' can help.
#'
#' There are several other measures, see details of \link[greybox]{pinball}
#' and \link[greybox]{hm}.
#'
#' @template author
#'
#' @aliases Errors
#' @param actual The vector or matrix of actual values.
#' @param forecast The vector or matrix of forecasts values.
#' @param lower The lower bound of the prediction interval.
#' @param upper The upper bound of the prediction interval.
#' @param scale The value that should be used in the denominator of MASE. Can
#' be anything but advised values are: mean absolute deviation of in-sample one
#' step ahead Naive error or mean absolute value of the in-sample actuals.
#' @param benchmark The vector or matrix of the forecasts of the benchmark
#' model.
#' @param benchmarkLower The lower bound of the prediction interval of the
#' benchmark model.
#' @param benchmarkUpper The upper bound of the prediction interval of the
#' benchmark model.
#' @param level The confidence level of the constructed interval.
#' @return All the functions return the scalar value.
#' @references \itemize{
#' \item Kourentzes N. (2014). The Bias Coefficient: a new metric for forecast bias
#' \url{https://kourentzes.com/forecasting/2014/12/17/the-bias-coefficient-a-new-metric-for-forecast-bias/}
#' \item Svetunkov, I. (2017). Naughty APEs and the quest for the holy grail.
#' \url{https://forecasting.svetunkov.ru/en/2017/07/29/naughty-apes-and-the-quest-for-the-holy-grail/}
#' \item Fildes R. (1992). The evaluation of
#' extrapolative forecasting methods. International Journal of Forecasting, 8,
#' pp.81-98.
#' \item Hyndman R.J., Koehler A.B. (2006). Another look at measures of
#' forecast accuracy. International Journal of Forecasting, 22, pp.679-688.
#' \item Petropoulos F., Kourentzes N. (2015). Forecast combinations for
#' intermittent demand. Journal of the Operational Research Society, 66,
#' pp.914-924.
#' \item Wallstrom P., Segerstedt A. (2010). Evaluation of forecasting error
#' measurements and techniques for intermittent demand. International Journal
#' of Production Economics, 128, pp.625-636.
#' \item Davydenko, A., Fildes, R. (2013). Measuring Forecasting Accuracy:
#' The Case Of Judgmental Adjustments To Sku-Level Demand Forecasts.
#' International Journal of Forecasting, 29(3), 510-522.
#' \url{https://doi.org/10.1016/j.ijforecast.2012.09.002}
#' \item Gneiting, T., & Raftery, A. E. (2007). Strictly proper scoring rules,
#' prediction, and estimation. Journal of the American Statistical Association,
#' 102(477), 359–378. \url{https://doi.org/10.1198/016214506000001437}
#' }
#'
#' @seealso \link[greybox]{pinball}, \link[greybox]{hm}, \link[greybox]{measures}
#'
#' @examples
#'
#'
#' y <- rnorm(100,10,2)
#' testForecast <- rep(mean(y[1:90]),10)
#'
#' MAE(y[91:100],testForecast)
#' MSE(y[91:100],testForecast)
#'
#' MPE(y[91:100],testForecast)
#' MAPE(y[91:100],testForecast)
#'
#' # Measures from Petropoulos & Kourentzes (2015)
#' MASE(y[91:100],testForecast,mean(abs(y[1:90])))
#' sMSE(y[91:100],testForecast,mean(abs(y[1:90]))^2)
#' sPIS(y[91:100],testForecast,mean(abs(y[1:90])))
#' sCE(y[91:100],testForecast,mean(abs(y[1:90])))
#'
#' # Original MASE from Hyndman & Koehler (2006)
#' MASE(y[91:100],testForecast,mean(abs(diff(y[1:90]))))
#'
#' testForecast2 <- rep(y[91],10)
#' # Relative measures, from and inspired by Davydenko & Fildes (2013)
#' rMAE(y[91:100],testForecast2,testForecast)
#' rRMSE(y[91:100],testForecast2,testForecast)
#' rAME(y[91:100],testForecast2,testForecast)
#'
#' #### Measures for the prediction intervals
#' # An example with mtcars data
#' ourModel <- alm(mpg~., mtcars[1:30,], distribution="dnorm")
#' ourBenchmark <- alm(mpg~1, mtcars[1:30,], distribution="dnorm")
#'
#' # Produce predictions with the interval
#' ourForecast <- predict(ourModel, mtcars[-c(1:30),], interval="p")
#' ourBenchmarkForecast <- predict(ourBenchmark, mtcars[-c(1:30),], interval="p")
#'
#' MIS(mtcars$mpg[-c(1:30)],ourForecast$lower,ourForecast$upper,0.95)
#' sMIS(mtcars$mpg[-c(1:30)],ourForecast$lower,ourForecast$upper,mean(mtcars$mpg[1:30]),0.95)
#' rMIS(mtcars$mpg[-c(1:30)],ourForecast$lower,ourForecast$upper,
#' ourBenchmarkForecast$lower,ourBenchmarkForecast$upper,0.95)
#'
#' ### Also, see pinball function for other measures for the intervals
#'
#' @rdname error-measures
#' @rdname error-measures
#' @export MAE
#' @aliases MAE
MAE <- function(actual,forecast){
# This function calculates Mean Absolute Error
# actual - actual values,
# forecast - forecasted values.
if(length(actual) != length(forecast)){
message("The length of the provided data differs.");
message(paste0("Length of actual: ",length(actual)));
message(paste0("Length of forecast: ",length(forecast)));
stop("Cannot proceed.",call.=FALSE);
}
else{
return(mean(abs(actual-forecast),na.rm=TRUE));
}
}
#' @rdname error-measures
#' @export MSE
#' @aliases MSE
MSE <- function(actual,forecast){
# This function calculates Mean squared Error
# actual - actual values,
# forecast - forecasted values.
if(length(actual) != length(forecast)){
message("The length of the provided data differs.");
message(paste0("Length of actual: ",length(actual)));
message(paste0("Length of forecast: ",length(forecast)));
stop("Cannot proceed.",call.=FALSE);
}
else{
return(mean((actual-forecast)^2,na.rm=TRUE));
}
}
#' @rdname error-measures
#' @export MRE
#' @aliases MRE
MRE <- function(actual,forecast){
# This function calculates Mean squared Error
# actual - actual values,
# forecast - forecasted values.
if(length(actual) != length(forecast)){
message("The length of the provided data differs.");
message(paste0("Length of actual: ",length(actual)));
message(paste0("Length of forecast: ",length(forecast)));
stop("Cannot proceed.",call.=FALSE);
}
else{
return(mean(sqrt(as.complex(actual-forecast)),na.rm=TRUE));
}
}
#' @rdname error-measures
#' @export MIS
#' @aliases MIS
MIS <- function(actual,lower,upper,level=0.95){
# This function calculates Mean Interval Score from Gneiting & Raftery, 2007
# actual - actual values,
# lower - the lower bound of the interval,
# upper - the upper bound of the interval,
if(level>1){
level[] <- level / 100;
}
alpha <- 1-level;
lengthsVector <- c(length(actual),length(upper),length(lower))
if(any(lengthsVector>min(lengthsVector))){
message("The length of the provided data differs.");
message(paste0("Length of actual: ",length(actual)));
message(paste0("Length of lower: ",length(lower)));
message(paste0("Length of upper: ",length(upper)));
stop("Cannot proceed.",call.=FALSE);
}
else{
h <- length(actual);
MISValue <- sum(upper-lower) + 2/alpha*(sum((lower-actual)*(actual<lower)) + sum((actual-upper)*(actual>upper)));
MISValue[] <- MISValue / h;
return(MISValue);
}
}
#' @rdname error-measures
#' @export MPE
#' @aliases MPE
MPE <- function(actual,forecast){
# This function calculates Mean / Median Percentage Error
# actual - actual values,
# forecast - forecasted or fitted values.
if(length(actual) != length(forecast)){
message("The length of the provided data differs.");
message(paste0("Length of actual: ",length(actual)));
message(paste0("Length of forecast: ",length(forecast)));
stop("Cannot proceed.",call.=FALSE);
}
else{
return(mean((actual-forecast)/actual,na.rm=TRUE));
}
}
#' @rdname error-measures
#' @export MAPE
#' @aliases MAPE
MAPE <- function(actual,forecast){
# This function calculates Mean Absolute Percentage Error
# actual - actual values,
# forecast - forecasted values.
if(length(actual) != length(forecast)){
message("The length of the provided data differs.");
message(paste0("Length of actual: ",length(actual)));
message(paste0("Length of forecast: ",length(forecast)));
stop("Cannot proceed.",call.=FALSE);
}
else{
return(mean(abs((actual-forecast)/actual),na.rm=TRUE));
}
}
#' @rdname error-measures
#' @export MASE
#' @aliases MASE
MASE <- function(actual,forecast,scale){
# This function calculates Mean Absolute Scaled Error as in Hyndman & Koehler, 2006
# actual - actual values,
# forecast - forecasted values.
# scale - the measure to scale errors with. Usually - MAE of in-sample.
if(length(actual) != length(forecast)){
message("The length of the provided data differs.");
message(paste0("Length of actual: ",length(actual)));
message(paste0("Length of forecast: ",length(forecast)));
stop("Cannot proceed.",call.=FALSE);
}
else{
return(mean(abs(actual-forecast),na.rm=TRUE)/scale);
}
}
#' @rdname error-measures
#' @export rMAE
#' @aliases rMAE
rMAE <-function(actual,forecast,benchmark){
# This function calculates Average Relative MAE
# actual - actual values,
# forecast - forecasted or fitted values.
# benchmark - forecasted or fitted values of etalon method.
if((length(actual) != length(forecast)) | (length(actual) != length(benchmark)) | (length(benchmark) != length(forecast))){
message("The length of the provided data differs.");
message(paste0("Length of actual: ",length(actual)));
message(paste0("Length of forecast: ",length(forecast)));
message(paste0("Length of benchmark: ",length(benchmark)));
stop("Cannot proceed.",call.=FALSE);
}
else{
if(all(forecast==benchmark)){
return(1);
}
else{
return(mean(abs(actual-forecast),na.rm=TRUE)/
mean(abs(actual-benchmark),na.rm=TRUE));
}
}
}
#' @rdname error-measures
#' @export rRMSE
#' @aliases rRMSE
rRMSE <-function(actual,forecast,benchmark){
# This function calculates Relative MSE
# actual - actual values,
# forecast - forecasted or fitted values.
# benchmark - forecasted or fitted values of etalon method.
if((length(actual) != length(forecast)) | (length(actual) != length(benchmark)) | (length(benchmark) != length(forecast))){
message("The length of the provided data differs.");
message(paste0("Length of actual: ",length(actual)));
message(paste0("Length of forecast: ",length(forecast)));
message(paste0("Length of benchmark: ",length(benchmark)));
stop("Cannot proceed.",call.=FALSE);
}
else{
if(all(forecast==benchmark)){
return(1);
}
else{
return(sqrt(mean((actual-forecast)^2,na.rm=TRUE)/
mean((actual-benchmark)^2,na.rm=TRUE)));
}
}
}
#' @rdname error-measures
#' @export rAME
#' @aliases rAME
rAME <-function(actual,forecast,benchmark){
# This function calculates Relative Absolute ME
# actual - actual values,
# forecast - forecasted or fitted values.
# benchmark - forecasted or fitted values of etalon method.
if((length(actual) != length(forecast)) | (length(actual) != length(benchmark)) | (length(benchmark) != length(forecast))){
message("The length of the provided data differs.");
message(paste0("Length of actual: ",length(actual)));
message(paste0("Length of forecast: ",length(forecast)));
message(paste0("Length of benchmark: ",length(benchmark)));
stop("Cannot proceed.",call.=FALSE);
}
else{
if(all(forecast==benchmark)){
return(1);
}
else{
return(abs(mean((actual-forecast),na.rm=TRUE))/
abs(mean((actual-benchmark),na.rm=TRUE)));
}
}
}
#' @rdname error-measures
#' @export rMIS
#' @aliases rMIS
rMIS <-function(actual,lower,upper,benchmarkLower,benchmarkUpper,level=0.95){
# This function calculates scaled MIS
# actual - actual values,
# forecast - forecasted values.
# scale - the measure to scale errors with.
# lower - the lower bound of the interval,
# upper - the upper bound of the interval,
# benchmarkLower - the lower bound of the interval of the benchmark method.
# benchmarkUpper - the upper bound of the interval of the benchmark method.
lengthsVector <- c(length(actual),length(upper),length(lower),length(benchmarkLower),length(benchmarkUpper));
if(any(lengthsVector>min(lengthsVector))){
message("The length of the provided data differs.");
stop("Cannot proceed.",call.=FALSE);
}
else{
return(MIS(actual=actual,lower=lower,upper=upper,level=level) /
MIS(actual=actual,lower=benchmarkLower,upper=benchmarkUpper,level=level));
}
}
#' @rdname error-measures
#' @export RelMAE
#' @aliases rMAE
RelMAE <- function(actual,forecast,benchmark){
warning("This function is depricated. Please, use rMAE instead");
return(rMAE(actual,forecast,benchmark));
}
#' @rdname error-measures
#' @export RelRMSE
#' @aliases rRMSE
RelRMSE <- function(actual,forecast,benchmark){
warning("This function is depricated. Please, use rRMSE instead");
return(rRMSE(actual,forecast,benchmark));
}
#' @rdname error-measures
#' @export RelAME
#' @aliases rAME
RelAME <- function(actual,forecast,benchmark){
warning("This function is depricated. Please, use rAME instead");
return(rAME(actual,forecast,benchmark));
}
#' @rdname error-measures
#' @export RelMIS
#' @aliases rMIS
RelMIS <- function(actual,lower,upper,benchmarkLower,benchmarkUpper,level=0.95){
warning("This function is depricated. Please, use rMIS instead");
return(rMIS(actual,lower,upper,benchmarkLower,benchmarkUpper,level));
}
#' @rdname error-measures
#' @export sMSE
#' @aliases sMSE
sMSE <- function(actual,forecast,scale){
# This function calculates scaled Mean Squared Error.
# Attention! Scale factor should be provided as squares of something!
# actual - actual values,
# forecast - forecasted values.
# scale - the measure to scale errors with. Usually - MAE of in-sample.
if(length(actual) != length(forecast)){
message("The length of the provided data differs.");
message(paste0("Length of actual: ",length(actual)));
message(paste0("Length of forecast: ",length(forecast)));
stop("Cannot proceed.",call.=FALSE);
}
else{
return(mean((actual-forecast)^2,na.rm=TRUE)/scale);
}
}
#' @rdname error-measures
#' @export sPIS
#' @aliases sPIS
sPIS <- function(actual,forecast,scale){
# This function calculates scaled Periods-In-Stock.
# actual - actual values,
# forecast - forecasted values.
# scale - the measure to scale errors with.
if(length(actual) != length(forecast)){
message("The length of the provided data differs.");
message(paste0("Length of actual: ",length(actual)));
message(paste0("Length of forecast: ",length(forecast)));
stop("Cannot proceed.",call.=FALSE);
}
else{
return(sum(cumsum(forecast-actual))/scale);
}
}
#' @rdname error-measures
#' @export sCE
#' @aliases sCE
sCE <- function(actual,forecast,scale){
# This function calculates scaled Cumulative Error.
# actual - actual values,
# forecast - forecasted values.
# scale - the measure to scale errors with.
if(length(actual) != length(forecast)){
message("The length of the provided data differs.");
message(paste0("Length of actual: ",length(actual)));
message(paste0("Length of forecast: ",length(forecast)));
stop("Cannot proceed.",call.=FALSE);
}
else{
return(sum(forecast-actual)/scale);
}
}
#' @rdname error-measures
#' @export sMIS
#' @aliases sMIS
sMIS <- function(actual,lower,upper,scale,level=0.95){
# This function calculates scaled MIS
# actual - actual values,
# forecast - forecasted values.
# scale - the measure to scale errors with.
# lower - the lower bound of the interval,
# upper - the upper bound of the interval,
# scale - the measure to scale errors with.
lengthsVector <- c(length(actual),length(upper),length(lower))
if(any(lengthsVector>min(lengthsVector))){
message("The length of the provided data differs.");
message(paste0("Length of actual: ",length(actual)));
message(paste0("Length of lower: ",length(lower)));
message(paste0("Length of upper: ",length(upper)));
stop("Cannot proceed.",call.=FALSE);
}
else{
return(MIS(actual=actual,lower=lower,upper=upper,level=level)/scale);
}
}
#' Error measures for the provided forecasts
#'
#' Function calculates several error measures using the provided
#' forecasts and the data for the holdout sample.
#'
#' @template author
#'
#' @aliases measures
#' @param holdout The vector of the holdout values.
#' @param forecast The vector of forecasts produced by a model.
#' @param actual The vector of actual in-sample values.
#' @param digits Number of digits of the output. If \code{NULL}
#' then no rounding is done.
#' @param benchmark The character variable, defining what to use as
#' benchmark for relative measures. Can be either \code{"naive"} or
#' \code{"mean"} (arithmetic mean of the whole series. The latter
#' can be useful when dealing with intermittent data.
#' @return The functions returns the named vector of errors:
#' \itemize{
#' \item MAE,
#' \item MSE
#' \item MPE,
#' \item MAPE,
#' \item MASE,
#' \item sMAE,
#' \item sMSE,
#' \item sCE,
#' \item rMAE,
#' \item rRMSE,
#' \item rAME,
#' \item cbias,
#' \item sPIS.
#' }
#' For the details on these errors, see \link[greybox]{Errors}.
#' @references \itemize{
#' \item Svetunkov, I. (2017). Naughty APEs and the quest for the holy grail.
#' \url{https://forecasting.svetunkov.ru/en/2017/07/29/naughty-apes-and-the-quest-for-the-holy-grail/}
#' \item Fildes R. (1992). The evaluation of
#' extrapolative forecasting methods. International Journal of Forecasting, 8,
#' pp.81-98.
#' \item Hyndman R.J., Koehler A.B. (2006). Another look at measures of
#' forecast accuracy. International Journal of Forecasting, 22, pp.679-688.
#' \item Petropoulos F., Kourentzes N. (2015). Forecast combinations for
#' intermittent demand. Journal of the Operational Research Society, 66,
#' pp.914-924.
#' \item Wallstrom P., Segerstedt A. (2010). Evaluation of forecasting error
#' measurements and techniques for intermittent demand. International Journal
#' of Production Economics, 128, pp.625-636.
#' \item Davydenko, A., Fildes, R. (2013). Measuring Forecasting Accuracy:
#' The Case Of Judgmental Adjustments To Sku-Level Demand Forecasts.
#' International Journal of Forecasting, 29(3), 510-522.
#' \url{https://doi.org/10.1016/j.ijforecast.2012.09.002}
#' }
#' @examples
#'
#'
#' y <- rnorm(100,10,2)
#' ourForecast <- rep(mean(y[1:90]),10)
#'
#' measures(y[91:100],ourForecast,y[1:90],digits=5)
#'
#' @export measures
measures <- function(holdout, forecast, actual, digits=NULL, benchmark=c("naive","mean")){
holdout <- as.vector(holdout);
h <- length(holdout)
forecast <- as.vector(forecast);
actual <- as.vector(actual);
benchmark <- match.arg(benchmark,c("naive","mean"));
becnhmarkForecast <- switch(benchmark,
"naive"=rep(actual[length(actual)],h),
"mean"=rep(mean(actual),h));
errormeasures <- c(MAE(holdout,forecast),
MSE(holdout,forecast),
MPE(holdout,forecast),
MAPE(holdout,forecast),
MASE(holdout,forecast,mean(abs(diff(actual)))),
MASE(holdout,forecast,mean(abs(actual))),
sMSE(holdout,forecast,mean(abs(actual[actual!=0]))^2),
sCE(holdout,forecast,mean(abs(actual[actual!=0]))),
rMAE(holdout,forecast,becnhmarkForecast),
rRMSE(holdout,forecast,becnhmarkForecast),
rAME(holdout,forecast,becnhmarkForecast),
cbias(holdout-forecast,0),
sPIS(holdout,forecast,mean(abs(actual[actual!=0]))));
if(!is.null(digits)){
errormeasures[] <- round(errormeasures, digits);
}
names(errormeasures) <- c("MAE","MSE",
"MPE","MAPE",
"MASE","sMAE","sMSE","sCE",
"rMAE","rRMSE","rAME","cbias","sPIS");
return(errormeasures);
}
#' Half moment of a distribution and its derivatives.
#'
#' \code{hm} function estimates half moment from some predefined constant
#' \code{C}. \code{ham} estimates half absolute moment. Finally, \code{cbias}
#' function returns bias based on \code{hm}.
#'
#' \code{NA} values of \code{x} are excluded on the first step of calculation.
#'
#' @template author
#'
#' @aliases hm
#' @param x A variable based on which HM is estimated.
#' @param C Centering parameter.
#' @param ... Other parameters passed to mean function.
#' @return A complex variable is returned for \code{hm} function and real values
#' are returned for \code{cbias} and \code{ham}.
#' @examples
#'
#' x <- rnorm(100,0,1)
#' hm(x)
#' ham(x)
#' cbias(x)
#'
#' @export hm
#' @rdname hm
hm <- function(x,C=mean(x),...){
# This function calculates half moment
return(mean(sqrt(as.complex(x[!is.na(x)]-C)),...));
}
#' @rdname hm
#' @export ham
#' @aliases ham
ham <- function(x,C=mean(x),...){
# This function calculates half moment
return(mean(sqrt(abs(x[!is.na(x)]-C)),...));
}
#' @rdname hm
#' @export cbias
#' @aliases cbias
cbias <- function(x,C=mean(x),...){
# This function calculates half moment
return(1 - Arg(hm(x,C,...))/(pi/4));
}
#' Pinball function
#'
#' The function returns the value from the pinball function for the specified level and
#' the type of loss
#'
#' @template author
#'
#' @param holdout The vector or matrix of the holdout values.
#' @param forecast The forecast of prediction interval (should be the same length as the
#' holdout).
#' @param level The level of the prediction interval associated with the forecast.
#' @param loss The type of loss to use. The number which corresponds to L1, L2 etc.
#' @return The function returns the scalar value.
#' @examples
#' # An example with mtcars data
#' ourModel <- alm(mpg~., mtcars[1:30,], distribution="dnorm")
#'
#' # Produce predictions with the interval
#' ourForecast <- predict(ourModel, mtcars[-c(1:30),], interval="p")
#'
#' # Pinball with the L1 (quantile value)
#' pinball(mtcars$mpg[-c(1:30)],ourForecast$upper,level=0.975,loss=1)
#' pinball(mtcars$mpg[-c(1:30)],ourForecast$lower,level=0.025,loss=1)
#'
#' # Pinball with the L2 (expectile value)
#' pinball(mtcars$mpg[-c(1:30)],ourForecast$upper,level=0.975,loss=2)
#' pinball(mtcars$mpg[-c(1:30)],ourForecast$lower,level=0.025,loss=2)
#'
#' @export pinball
pinball <- function(holdout, forecast, level, loss=1){
# This function calculates pinball cost function for the bound of prediction interval
if(length(holdout) != length(forecast)){
message("The length of the provided data differs.");
message(paste0("Length of holdout: ",length(holdout)));
message(paste0("Length of forecast: ",length(forecast)));
stop("Cannot proceed.",call.=FALSE);
}
else{
result <- ((1-level)*sum(abs((holdout-forecast))^loss * (holdout<=forecast)) +
level*sum(abs((holdout-forecast))^loss * (holdout>forecast)));
return(result);
}
}
|
getNOAA.bathy <-
function(lon1,lon2,lat1,lat2, resolution = 4, keep=FALSE, antimeridian=FALSE){
if (lon1 == lon2) stop("The longitudinal range defined by lon1 and lon2 is incorrect")
if (lat1 == lat2) stop("The latitudinal range defined by lat1 and lat2 is incorrect")
if (lat1 > 90 | lat1 < -90 | lat2 > 90 | lat2 < -90) stop("Latitudes should have values between -90 and +90")
if (lon1 < -180 | lon1 > 180 | lon2 < -180 | lon2 > 180) stop("Longitudes should have values between -180 and +180")
if (resolution < 1) stop("The resolution must be equal to or greater than 1")
x1=x2=y1=y2 = NULL
if (lon1 < lon2) {lon1->x1 ; lon2->x2} else {lon1->x2 ; lon2->x1}
if (lat1 < lat2) {lat1->y1 ; lat2->y2} else {lat1->y2 ; lat2->y1}
res = resolution * 0.016666666666666667
ncell.lon <- (x2-x1)*60/resolution
ncell.lat <- (y2-y1)*60/resolution
if (ncell.lon < 2 & ncell.lat < 2) stop("It's impossible to fetch an area with less than one cell. Either increase the longitudinal and longitudinal ranges or the resolution (i.e. use a smaller res value)")
if (ncell.lon < 2) stop("It's impossible to fetch an area with less than one cell. Either increase the longitudinal range or the resolution (i.e. use a smaller res value)")
if (ncell.lat < 2) stop("It's impossible to fetch an area with less than one cell. Either increase the latitudinal range or the resolution (i.e. use a smaller res value)")
fetch <- function(x1,y1,x2,y2,res) {
WEB.REQUEST <- paste("https://gis.ngdc.noaa.gov/cgi-bin/public/wcs/etopo1.xyz?filename=etopo1.xyz&request=getcoverage&version=1.0.0&service=wcs&coverage=etopo1&CRS=EPSG:4326&format=xyz&resx=", res, "&resy=", res, "&bbox=", x1, ",", y1, ",", x2, ",", y2, sep = "")
dat <- suppressWarnings(try(read.table(WEB.REQUEST),silent=TRUE))
return(dat)
}
# Naming the file
if (antimeridian) {
FILE <- paste("marmap_coord_",x1,";",y1,";",x2,";",y2,"_res_",resolution,"_anti",".csv", sep="")
} else {
FILE <- paste("marmap_coord_",x1,";",y1,";",x2,";",y2,"_res_",resolution,".csv", sep="")
}
# If file exists in the working directory, load it,
if(FILE %in% list.files() ) {
cat("File already exists ; loading \'", FILE,"\'", sep="")
read.bathy(FILE, header=T) -> exisiting.bathy
return(exisiting.bathy)
} else { # otherwise, fetch it on NOAA server
if (antimeridian) {
l1 <- x2 ; l2 <- 180 ; l3 <- -180 ; l4 <- x1
cat("Querying NOAA database ...\n")
cat("This may take seconds to minutes, depending on grid size\n")
left <- fetch(l1,y1,l2,y2,res)
right <- fetch(l3,y1,l4,y2,res)
if (is(left,"try-error")|is(right,"try-error")) {
stop("The NOAA server cannot be reached\n")
} else {
cat("Building bathy matrix ...\n")
left <- as.bathy(left) ; left <- left[-nrow(left),]
right <- as.bathy(right)
rownames(right) <- as.numeric(rownames(right)) + 360
bath2 <- rbind(left,right)
class(bath2) <- "bathy"
bath <- as.xyz(bath2)
}
} else {
cat("Querying NOAA database ...\n")
cat("This may take seconds to minutes, depending on grid size\n")
bath <- fetch(x1,y1,x2,y2,res)
if (is(bath,"try-error")) {
stop("The NOAA server cannot be reached\n")
} else {
cat("Building bathy matrix ...\n")
bath2 <- as.bathy(bath)
}
}
if (keep) {
write.table(bath, file=FILE, sep=",", quote=FALSE, row.names=FALSE)
}
return(bath2)
}
} | /R/getNOAA.bathy.R | no_license | jdpye/marmap | R | false | false | 3,385 | r | getNOAA.bathy <-
function(lon1,lon2,lat1,lat2, resolution = 4, keep=FALSE, antimeridian=FALSE){
if (lon1 == lon2) stop("The longitudinal range defined by lon1 and lon2 is incorrect")
if (lat1 == lat2) stop("The latitudinal range defined by lat1 and lat2 is incorrect")
if (lat1 > 90 | lat1 < -90 | lat2 > 90 | lat2 < -90) stop("Latitudes should have values between -90 and +90")
if (lon1 < -180 | lon1 > 180 | lon2 < -180 | lon2 > 180) stop("Longitudes should have values between -180 and +180")
if (resolution < 1) stop("The resolution must be equal to or greater than 1")
x1=x2=y1=y2 = NULL
if (lon1 < lon2) {lon1->x1 ; lon2->x2} else {lon1->x2 ; lon2->x1}
if (lat1 < lat2) {lat1->y1 ; lat2->y2} else {lat1->y2 ; lat2->y1}
res = resolution * 0.016666666666666667
ncell.lon <- (x2-x1)*60/resolution
ncell.lat <- (y2-y1)*60/resolution
if (ncell.lon < 2 & ncell.lat < 2) stop("It's impossible to fetch an area with less than one cell. Either increase the longitudinal and longitudinal ranges or the resolution (i.e. use a smaller res value)")
if (ncell.lon < 2) stop("It's impossible to fetch an area with less than one cell. Either increase the longitudinal range or the resolution (i.e. use a smaller res value)")
if (ncell.lat < 2) stop("It's impossible to fetch an area with less than one cell. Either increase the latitudinal range or the resolution (i.e. use a smaller res value)")
fetch <- function(x1,y1,x2,y2,res) {
WEB.REQUEST <- paste("https://gis.ngdc.noaa.gov/cgi-bin/public/wcs/etopo1.xyz?filename=etopo1.xyz&request=getcoverage&version=1.0.0&service=wcs&coverage=etopo1&CRS=EPSG:4326&format=xyz&resx=", res, "&resy=", res, "&bbox=", x1, ",", y1, ",", x2, ",", y2, sep = "")
dat <- suppressWarnings(try(read.table(WEB.REQUEST),silent=TRUE))
return(dat)
}
# Naming the file
if (antimeridian) {
FILE <- paste("marmap_coord_",x1,";",y1,";",x2,";",y2,"_res_",resolution,"_anti",".csv", sep="")
} else {
FILE <- paste("marmap_coord_",x1,";",y1,";",x2,";",y2,"_res_",resolution,".csv", sep="")
}
# If file exists in the working directory, load it,
if(FILE %in% list.files() ) {
cat("File already exists ; loading \'", FILE,"\'", sep="")
read.bathy(FILE, header=T) -> exisiting.bathy
return(exisiting.bathy)
} else { # otherwise, fetch it on NOAA server
if (antimeridian) {
l1 <- x2 ; l2 <- 180 ; l3 <- -180 ; l4 <- x1
cat("Querying NOAA database ...\n")
cat("This may take seconds to minutes, depending on grid size\n")
left <- fetch(l1,y1,l2,y2,res)
right <- fetch(l3,y1,l4,y2,res)
if (is(left,"try-error")|is(right,"try-error")) {
stop("The NOAA server cannot be reached\n")
} else {
cat("Building bathy matrix ...\n")
left <- as.bathy(left) ; left <- left[-nrow(left),]
right <- as.bathy(right)
rownames(right) <- as.numeric(rownames(right)) + 360
bath2 <- rbind(left,right)
class(bath2) <- "bathy"
bath <- as.xyz(bath2)
}
} else {
cat("Querying NOAA database ...\n")
cat("This may take seconds to minutes, depending on grid size\n")
bath <- fetch(x1,y1,x2,y2,res)
if (is(bath,"try-error")) {
stop("The NOAA server cannot be reached\n")
} else {
cat("Building bathy matrix ...\n")
bath2 <- as.bathy(bath)
}
}
if (keep) {
write.table(bath, file=FILE, sep=",", quote=FALSE, row.names=FALSE)
}
return(bath2)
}
} |
## cachematrix.R
##
## Create a cache marix object that can be used to
## repeatably solve the inverse of the marix, but only
## calculates the inverse once.
##
## Usage:
## M <- matrix(c(1, 2, 3, 4), nrow=2, ncol=2)
## cacheMatrix <- makeCacheMatrix(M)
## cacheSolve(cacheMatrix)
##
## cacheMatrix$set(M) # Change the matrix being cached.
## M <- cacheMatrix$get() # Returns the matrix being cached.
##
## cacheMatrix$setInverse(solve(data, ...)) # Private function containing cached inverse of x
## cacheMatrix$getInverse() # Private function used to get the cached inverse of x
## Create a cacheMatrix object for an invertale matrix.
makeCacheMatrix <- function(x = matrix()) {
cachedInverse <- NULL
set <- function(y) {
x <<- y
cachedInverse <<- NULL
}
get <- function() x
setInverse <- function(inverse) cachedInverse <<- inverse
getInverse <- function() cachedInverse
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Return the inverse of an cacheMatrix object
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
invFunc <- x$getInverse()
if(!is.null(invFunc)) {
message("getting cached data")
return(invFunc)
}
data <- x$get()
if (det(data) == 0)
return(NULL)
invFunc <- solve(data, ...)
x$setInverse(invFunc)
invFunc
} | /02_R_Programming/programming-assignment2/cacheMatrix.R | no_license | paviana/Coursera-Data_Science_Specialization | R | false | false | 1,452 | r | ## cachematrix.R
##
## Create a cache marix object that can be used to
## repeatably solve the inverse of the marix, but only
## calculates the inverse once.
##
## Usage:
## M <- matrix(c(1, 2, 3, 4), nrow=2, ncol=2)
## cacheMatrix <- makeCacheMatrix(M)
## cacheSolve(cacheMatrix)
##
## cacheMatrix$set(M) # Change the matrix being cached.
## M <- cacheMatrix$get() # Returns the matrix being cached.
##
## cacheMatrix$setInverse(solve(data, ...)) # Private function containing cached inverse of x
## cacheMatrix$getInverse() # Private function used to get the cached inverse of x
## Create a cacheMatrix object for an invertale matrix.
makeCacheMatrix <- function(x = matrix()) {
cachedInverse <- NULL
set <- function(y) {
x <<- y
cachedInverse <<- NULL
}
get <- function() x
setInverse <- function(inverse) cachedInverse <<- inverse
getInverse <- function() cachedInverse
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Return the inverse of an cacheMatrix object
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
invFunc <- x$getInverse()
if(!is.null(invFunc)) {
message("getting cached data")
return(invFunc)
}
data <- x$get()
if (det(data) == 0)
return(NULL)
invFunc <- solve(data, ...)
x$setInverse(invFunc)
invFunc
} |
#' @importFrom ggplot2 theme_get theme theme_set element_text
#' @export
font_size <- function(fs_title, fs_axis_title.x, fs_axis_title.y, fs_labels.x, fs_labels.y) {
# get current theme
cur.theme <- ggplot2::theme_get()
if (!missing(fs_title)) {
cur.theme <- cur.theme +
ggplot2::theme(title = ggplot2::element_text(size = fs_title))
}
if (!missing(fs_axis_title.x)) {
cur.theme <- cur.theme +
ggplot2::theme(axis.title.x = ggplot2::element_text(size = fs_axis_title.x))
}
if (!missing(fs_axis_title.y)) {
cur.theme <- cur.theme +
ggplot2::theme(axis.title.y = ggplot2::element_text(size = fs_axis_title.y))
}
if (!missing(fs_labels.x)) {
cur.theme <- cur.theme +
ggplot2::theme(axis.text.x = ggplot2::element_text(size = fs_labels.x))
}
if (!missing(fs_labels.y)) {
cur.theme <- cur.theme +
ggplot2::theme(axis.text.y = ggplot2::element_text(size = fs_labels.y))
}
ggplot2::theme_set(cur.theme)
}
| /R/font_size.R | no_license | stefanfritsch/sjPlot | R | false | false | 983 | r | #' @importFrom ggplot2 theme_get theme theme_set element_text
#' @export
font_size <- function(fs_title, fs_axis_title.x, fs_axis_title.y, fs_labels.x, fs_labels.y) {
# get current theme
cur.theme <- ggplot2::theme_get()
if (!missing(fs_title)) {
cur.theme <- cur.theme +
ggplot2::theme(title = ggplot2::element_text(size = fs_title))
}
if (!missing(fs_axis_title.x)) {
cur.theme <- cur.theme +
ggplot2::theme(axis.title.x = ggplot2::element_text(size = fs_axis_title.x))
}
if (!missing(fs_axis_title.y)) {
cur.theme <- cur.theme +
ggplot2::theme(axis.title.y = ggplot2::element_text(size = fs_axis_title.y))
}
if (!missing(fs_labels.x)) {
cur.theme <- cur.theme +
ggplot2::theme(axis.text.x = ggplot2::element_text(size = fs_labels.x))
}
if (!missing(fs_labels.y)) {
cur.theme <- cur.theme +
ggplot2::theme(axis.text.y = ggplot2::element_text(size = fs_labels.y))
}
ggplot2::theme_set(cur.theme)
}
|
#######################################################################################
###############
############### AntMAN Package
###############
###############
#######################################################################################
#' S3 class AM_mcmc_output
#' @description Output type of return values from \code{\link{AM_mcmc_fit}}.
#' @seealso \code{\link{AM_mcmc_fit}}
#' @name AM_mcmc_output
#' @return \code{\link{AM_mcmc_output}}
NULL
#' S3 class AM_mcmc_configuration
#' @description Output type of return values from \code{\link{AM_mcmc_parameters}}.
#' @seealso \code{\link{AM_mcmc_fit}}
#' @name AM_mcmc_configuration
#' @return \code{\link{AM_mcmc_configuration}}
NULL
#################################################################################
##### AM_mcmc_configuration function
#################################################################################
#' summary information of the AM_mcmc_configuration object
#'
#'
#'
#' Given an \code{\link{AM_mcmc_configuration}} object, this function prints the summary information
#' of the specified mcmc configuration.
#'
#'@param object an \code{\link{AM_mcmc_configuration}} object.
#'@param ... all additional parameters are ignored
#'
#'
#'@method summary AM_mcmc_configuration
#'@seealso \code{\link{AM_mcmc_parameters}}
#'@return NULL. Called for side effects.
#'@export
summary.AM_mcmc_configuration = function(object, ...){
cat("\n", "AM_mcmc_configuration\n", sep = "")
for (item in names(object)) {
cat(' -', item , ': ' , head(unlist(object[[item]], use.names=FALSE)), "\n")
}
}
#################################################################################
##### AM_mcmc_output function
#################################################################################
#' plot AM_mcmc_output
#'
#'
#' Given an \code{\link{AM_mcmc_output}} object, this function plots some useful information about the MCMC results
#' regarding \eqn{M} and \eqn{K}. Besides the PMFs, some of the diagnostic plots of the MCMC chain are visualised.
#'
#'@param x an \code{\link{AM_mcmc_output}} object.
#'@param ... all additional parameters are ignored.
#'@return NULL. Called for side effects.
#'
#'@method plot AM_mcmc_output
#'@importFrom graphics image
#'@importFrom grDevices gray.colors
#'@export
plot.AM_mcmc_output=function(x, ...){
print(AM_plot_pairs(x));readline(prompt="Press [enter] to continue")
print(AM_plot_pmf(x));readline(prompt="Press [enter] to continue")
print(AM_plot_traces(x));readline(prompt="Press [enter] to continue")
print(AM_plot_values(x));readline(prompt="Press [enter] to continue")
print(AM_plot_chaincor(x));
return(NULL)
}
#' Internal function that produces a string from a list of values
#'
#'@param x a list of values
#'
#'@importFrom utils head
#' @keywords internal
list_values = function (x) {
arguments = vector();
for (item in names(x)) {
arguments = append(arguments,sprintf("%s = %s",item, head(x[[item]]) )) ;
}
return (paste(arguments, collapse=", "));
}
#' summary information of the AM_mcmc_output object
#'
#'
#' Given an \code{\link{AM_mcmc_output}} object, this function prints the summary information
#' pertaining to the given model output.
#'
#'@param object a \code{\link{AM_mcmc_output}} object
#'@param ... all additional parameters are ignored
#'@return NULL. Called for side effects.
#'
#'
#'@method summary AM_mcmc_output
#'@seealso \code{\link{AM_mcmc_fit}}, \code{\link{AM_mcmc_refit}}
#'@export
summary.AM_mcmc_output=function(object,...){
cat("\n","Fitted model:","\n");
cat(" -mix_kernel_hyperparams(",list_values(attr(object,'mix_kernel_hyperparams')),")\n", sep = "");
cat(" -mix_components_prior(",list_values(attr(object,'mix_components_prior')),")\n", sep = "");
cat(" -mix_weight_prior(",list_values(attr(object,'mix_weight_prior')),")\n", sep = "");
cat(" -mcmc_parameters(",list_values(attr(object,'mcmc_parameters')),")\n", sep = "");
cat("\n - Summary of the MCMC output:\n\n");
cat(sprintf(" %10s%10s%10s%10s%10s%10s%10s%10s\n", "Name", "Mean", "StdDev", "2.5%","50%","97.5%", "ESS", "MCMC Err."));
invisible = c("CI","W","mu","Sig","sig2","theta","R","P")
# If fixed clustering
for (item in names(object)) {
if (!item %in% invisible) {
allcols = AM_extract(object,c(item))
for (subitem in names(allcols)) {
e = allcols[[subitem]]
emean = mean(e)
esd = sd(e)
elen = length(e)
neff = NA
if (anyNA(e) == FALSE) neff = IAM_mcmc_neff(e)
mcmcerror = NA
if (anyNA(e) == FALSE) mcmcerror = IAM_mcmc_error(e)
q = quantile(e,prob=c(0.025, 0.5,0.975), names=FALSE, na.rm=TRUE)
cat(sprintf(" %10s%10.2f%10.2f%10.2f%10.2f%10.2f%10.2f%10.2f\n", subitem, emean, esd, q[1], q[2], q[3], neff, mcmcerror));
}
}
}
}
## INTERNAL
# AM_reshape is an internal function for reshaping the fit output into the correct form
AM_reshape <- function(fit, y){
y_dim = dim(y)[2]
if (is.null(y_dim)){
y_dim = 1}
if (!is.null(fit$theta)){
theta_mat = as.matrix(fit$theta)
theta = apply(theta_mat, 1, function(x){
x_vec = as.numeric(unlist(x))
matrix(x_vec, ncol=y_dim, nrow=length(x_vec)/y_dim, byrow=F)})
fit$theta = theta
}
if (!is.null(fit$mu)){
mu = mapply(function(x, m){
mu_size = length(x)/m
mu_individual = split(x, ceiling(seq_along(x)/mu_size))
mu_combined = lapply(mu_individual, function(mu_vec){
matrix(unlist(mu_vec), nrow=y_dim, byrow=F)
})
}, fit$mu, fit$M)
fit$mu = mu
}
if (!is.null(fit$sig2)){
sig = mapply(function(x, m){
sig_size = length(x)/m
sig_individual = split(x, ceiling(seq_along(x)/sig_size))
sig_combined = lapply(sig_individual, function(sig_vec){
matrix(unlist(sig_vec), ncol=y_dim, nrow=y_dim, byrow=F)
})
}, fit$sig2, fit$M)
fit$sig2 = sig
}
if (!is.null(fit$Sig)){
sig = mapply(function(x, m){
sig_size = length(x)/m
sig_individual = split(x, ceiling(seq_along(x)/sig_size))
sig_combined = lapply(sig_individual, function(sig_vec){
matrix(unlist(sig_vec), ncol=y_dim, nrow=y_dim, byrow=F)
})
}, fit$Sig, fit$M)
fit$Sig = sig
}
return(fit)
}
#################################################################################
##### AM_mcmc_fit function
#################################################################################
#' Performs a Gibbs sampling
#'
#' The \code{AM_mcmc_fit} function performs a Gibbs sampling in order to estimate the mixture comprising the sample data \code{y}.
#' The mixture selected must be of a predefined type \code{mix_kernel_hyperparams} (defined with \code{AM_mix_hyperparams_*} functions, where star
#' \code{*} denotes the chosen kernel).
#' Additionally, a prior distribution on the number of mixture components
#' must be specified through \code{mix_components_prior}
#' (generated with \code{AM_mix_components_prior_*} functions, where \code{*} denotes the chosen prior). Similarly,
#' a prior on the weights of the mixture should be specified through \code{mix_weight_prior}
#' (defined with \code{AM_mix_weights_prior_*} functions). Finally, with \code{mcmc_parameters}, the user sets
#' the MCMC parameters for the Gibbs sampler (defined with \code{\link{AM_mcmc_parameters}} functions).
#'
#' If no initial clustering is specified (either as \code{init_K} or \code{init_clustering}),
#' then every observation is allocated to a different cluster.
#' If \code{init_K} is specified then AntMAN initialises the clustering through K-means.
#'
#' **Warning**: if the user does not specify init_K or initial_cluster, the first steps can be be time-consuming because of default setting of the initial clustering.
#'
#'
#'@param y input data, can be a vector or a matrix.
#'@param mix_kernel_hyperparams is a configuration list, defined by *_mix_hyperparams functions, where * denotes the chosen kernel.
#'See \code{\link{AM_mix_hyperparams_multiber}}, \code{\link{AM_mix_hyperparams_multinorm}}, \code{\link{AM_mix_hyperparams_uninorm}}, \code{\link{AM_mix_hyperparams_unipois}} for more details.
#'@param initial_clustering is a vector CI of initial cluster assignement. If no clustering is specified (either as \code{init_K} or \code{init_clustering}), then every observation is
#' assigned to its own cluster.
#'@param fixed_clustering if specified, this is the vector CI containing the cluster assignments. This will remain unchanged for every iteration.
#'@param init_K initial value for the number of cluster. When this is specified, AntMAN intitialises the clustering assign usng K-means.
#'@param mix_components_prior is a configuration list defined by AM_mix_components_prior_* functions, where * denotes the chosen prior.
#' See \code{\link{AM_mix_components_prior_dirac}},
#' \cr \code{\link{AM_mix_components_prior_negbin}}, \code{\link{AM_mix_components_prior_pois}} for more \cr
#' details.
#'@param mix_weight_prior is a configuration list defined by AM_weight_prior_* functions, where * denotes the chosen prior specification.
#' See \code{\link{AM_mix_weights_prior_gamma}} for more \cr details.
#'@param mcmc_parameters is a configuration list defined by AM_mcmc_parameters. See \code{\link{AM_mcmc_parameters}} for more details.
#'@return The return value is an \code{\link{AM_mcmc_output}} object.
#'@examples
#' \donttest{
#' AM_mcmc_fit( AM_sample_unipois()$y,
#' AM_mix_hyperparams_unipois (alpha0=2, beta0=0.2),
#' mcmc_parameters = AM_mcmc_parameters(niter=50, burnin=0, thin=1, verbose=0))
#' }
#'@useDynLib AntMAN
#'@export
AM_mcmc_fit <- function(
y,
mix_kernel_hyperparams,
initial_clustering = NULL,
init_K = NULL,
fixed_clustering = NULL,
mix_components_prior = AM_mix_components_prior_pois() ,
mix_weight_prior = AM_mix_weights_prior_gamma(),
mcmc_parameters = AM_mcmc_parameters() ) {
fixed_cluster = FALSE
if (is.null(fixed_clustering) & is.null(init_K) & !is.null(initial_clustering)) {
fixed_cluster = FALSE
# initial_clustering is set
} else if (!is.null(init_K) & is.null(initial_clustering)& is.null(fixed_clustering)) {
fixed_cluster = FALSE
initial_clustering <- kmeans(y, init_K)$cluster
# initial_clustering is set
} else if (is.null(init_K) & is.null(initial_clustering)& is.null(fixed_clustering)) {
fixed_cluster = FALSE
initial_clustering <- 0:(NROW(y)-1)
# initial_clustering is set
} else if (is.null(init_K) & is.null(initial_clustering)& !is.null(fixed_clustering)) {
fixed_cluster = TRUE
initial_clustering = fixed_clustering
}
else {
stop("Please provide only one of K_init or initial_clustering or fixed_clustering.")
}
fit_result = (structure(
IAM_mcmc_fit(y = y, mix_kernel_hyperparams = mix_kernel_hyperparams, initial_clustering = initial_clustering, fixed_clustering= fixed_cluster , mix_components_prior = mix_components_prior, mix_weight_prior = mix_weight_prior, mcmc_parameters = mcmc_parameters)
, class = "AM_mcmc_output",
mix_kernel_hyperparams = mix_kernel_hyperparams,
initial_clustering = initial_clustering,
init_K = init_K,
fixed_clustering = fixed_clustering,
mix_components_prior =mix_components_prior ,
mix_weight_prior = mix_weight_prior,
mcmc_parameters =mcmc_parameters));
fit_result = AM_reshape(fit_result, y)
return (fit_result)
}
#' Performs a Gibbs sampling reusing previous configuration
#'
#' Similar to \code{\link{AM_mcmc_fit}}, the \code{AM_mcmc_refit} function performs a Gibbs sampling in order to estimate
#' a mixture. However parameters will be reused from a previous result from \code{\link{AM_mcmc_fit}}.
#'
#' In practice this function will call AM_mcmc_fit(y, fixed_clustering = fixed_clustering, ...); with the same parameters as previously
#' specified.
#'
#'@param y input data, can be a vector or a matrix.
#'@param fit previous output from \code{\link{AM_mcmc_fit}} that is used to setup kernel and priors.
#'@param fixed_clustering is a vector CI of cluster assignment that will remain unchanged for every iterations.
#'@param mcmc_parameters is a configuration list defined by \code{\link{AM_mcmc_parameters}}.
#'@return The return value is an \code{\link{AM_mcmc_output}} object.
#'@examples
#' \donttest{
#' y = AM_sample_unipois()$y
#' fit = AM_mcmc_fit( y ,
#' AM_mix_hyperparams_unipois (alpha0=2, beta0=0.2),
#' mcmc_parameters = AM_mcmc_parameters(niter=20, burnin=0, thin=1, verbose=0))
#' eam = AM_coclustering(fit)
#' cluster = AM_salso(eam, "binder")
#' refit = AM_mcmc_refit(y , fit, cluster,
#' mcmc_parameters = AM_mcmc_parameters(niter=20, burnin=0, thin=1, verbose=0));
#' }
#'@export
AM_mcmc_refit <- function(
y, fit,
fixed_clustering,
mcmc_parameters = AM_mcmc_parameters() ) {
mcp = attr(fit,'mix_components_prior')
mwp = attr(fit,'mix_weight_prior')
mkh = attr(fit,'mix_kernel_hyperparams')
## TODO : check input data size
AM_mcmc_fit(y,
mix_kernel_hyperparams = mkh,
fixed_clustering = fixed_clustering,
mix_components_prior = mcp ,
mix_weight_prior = mwp,
mcmc_parameters = mcmc_parameters );
}
#################################################################################
##### AM_mcmc_parameters function
#################################################################################
#' MCMC Parameters
#'
#' This function generates an MCMC parameters list to be used as \code{mcmc_parameters} argument within \code{\link{AM_mcmc_fit}}.
#'
#'
#'
#'@param niter Total number of MCMC iterations to be carried out.
#'@param burnin Number of iterations to be considered as burn-in. Samples from this burn-in period are discarded.
#'@param thin Thinning rate. This argument specifies how often a draw from the posterior distribution is stored after
#' burnin, i.e. one every -th samples is saved. Therefore, the toral number of MCMC samples saved is
#' (\code{niter} -\code{burnin})/\code{thin}. If thin =1, then AntMAN stores every iteration.
#'@param verbose A value from 0 to 4, that specifies the desired level of verbosity (0:None, 1:Warnings, 2:Debug, 3:Extras).
#'@param output A list of parameters output to return.
#'@param output_dir Path to an output dir, where to store all the outputs.
#'@param parallel Some of the algorithms can be run in parallel using OpenMP. When set to True, this parameter triggers the parallelism.
#'@return An \code{\link{AM_mcmc_configuration}} Object. This is a list to be used as \code{mcmc_parameters} argument with \code{\link{AM_mcmc_fit}}.
#'@examples
#' AM_mcmc_parameters (niter=1000, burnin=10000, thin=50)
#' AM_mcmc_parameters (niter=1000, burnin=10000, thin=50, output=c("CI","W","TAU"))
#'@export
AM_mcmc_parameters <- function( niter=5000,
burnin=2500, ## niter / 2
thin=1,
verbose = 1,
output=c("CI","K"),
parallel=TRUE,
output_dir = NULL) {
return (structure(list(type="AM_MCMC_PARAMETERS",
niter=niter, burnin=burnin, thin=thin,
verbose=verbose, output=output, parallel=parallel,
output_dir=output_dir), class = "AM_mcmc_configuration") );
}
| /AntMAN/R/AM_mcmc.R | no_license | bbodin/AntMAN | R | false | false | 15,242 | r | #######################################################################################
###############
############### AntMAN Package
###############
###############
#######################################################################################
#' S3 class AM_mcmc_output
#' @description Output type of return values from \code{\link{AM_mcmc_fit}}.
#' @seealso \code{\link{AM_mcmc_fit}}
#' @name AM_mcmc_output
#' @return \code{\link{AM_mcmc_output}}
NULL
#' S3 class AM_mcmc_configuration
#' @description Output type of return values from \code{\link{AM_mcmc_parameters}}.
#' @seealso \code{\link{AM_mcmc_fit}}
#' @name AM_mcmc_configuration
#' @return \code{\link{AM_mcmc_configuration}}
NULL
#################################################################################
##### AM_mcmc_configuration function
#################################################################################
#' summary information of the AM_mcmc_configuration object
#'
#'
#'
#' Given an \code{\link{AM_mcmc_configuration}} object, this function prints the summary information
#' of the specified mcmc configuration.
#'
#'@param object an \code{\link{AM_mcmc_configuration}} object.
#'@param ... all additional parameters are ignored
#'
#'
#'@method summary AM_mcmc_configuration
#'@seealso \code{\link{AM_mcmc_parameters}}
#'@return NULL. Called for side effects.
#'@export
summary.AM_mcmc_configuration = function(object, ...){
cat("\n", "AM_mcmc_configuration\n", sep = "")
for (item in names(object)) {
cat(' -', item , ': ' , head(unlist(object[[item]], use.names=FALSE)), "\n")
}
}
#################################################################################
##### AM_mcmc_output function
#################################################################################
#' plot AM_mcmc_output
#'
#'
#' Given an \code{\link{AM_mcmc_output}} object, this function plots some useful information about the MCMC results
#' regarding \eqn{M} and \eqn{K}. Besides the PMFs, some of the diagnostic plots of the MCMC chain are visualised.
#'
#'@param x an \code{\link{AM_mcmc_output}} object.
#'@param ... all additional parameters are ignored.
#'@return NULL. Called for side effects.
#'
#'@method plot AM_mcmc_output
#'@importFrom graphics image
#'@importFrom grDevices gray.colors
#'@export
plot.AM_mcmc_output=function(x, ...){
print(AM_plot_pairs(x));readline(prompt="Press [enter] to continue")
print(AM_plot_pmf(x));readline(prompt="Press [enter] to continue")
print(AM_plot_traces(x));readline(prompt="Press [enter] to continue")
print(AM_plot_values(x));readline(prompt="Press [enter] to continue")
print(AM_plot_chaincor(x));
return(NULL)
}
#' Internal function that produces a string from a list of values
#'
#'@param x a list of values
#'
#'@importFrom utils head
#' @keywords internal
list_values = function (x) {
arguments = vector();
for (item in names(x)) {
arguments = append(arguments,sprintf("%s = %s",item, head(x[[item]]) )) ;
}
return (paste(arguments, collapse=", "));
}
#' summary information of the AM_mcmc_output object
#'
#'
#' Given an \code{\link{AM_mcmc_output}} object, this function prints the summary information
#' pertaining to the given model output.
#'
#'@param object a \code{\link{AM_mcmc_output}} object
#'@param ... all additional parameters are ignored
#'@return NULL. Called for side effects.
#'
#'
#'@method summary AM_mcmc_output
#'@seealso \code{\link{AM_mcmc_fit}}, \code{\link{AM_mcmc_refit}}
#'@export
summary.AM_mcmc_output=function(object,...){
cat("\n","Fitted model:","\n");
cat(" -mix_kernel_hyperparams(",list_values(attr(object,'mix_kernel_hyperparams')),")\n", sep = "");
cat(" -mix_components_prior(",list_values(attr(object,'mix_components_prior')),")\n", sep = "");
cat(" -mix_weight_prior(",list_values(attr(object,'mix_weight_prior')),")\n", sep = "");
cat(" -mcmc_parameters(",list_values(attr(object,'mcmc_parameters')),")\n", sep = "");
cat("\n - Summary of the MCMC output:\n\n");
cat(sprintf(" %10s%10s%10s%10s%10s%10s%10s%10s\n", "Name", "Mean", "StdDev", "2.5%","50%","97.5%", "ESS", "MCMC Err."));
invisible = c("CI","W","mu","Sig","sig2","theta","R","P")
# If fixed clustering
for (item in names(object)) {
if (!item %in% invisible) {
allcols = AM_extract(object,c(item))
for (subitem in names(allcols)) {
e = allcols[[subitem]]
emean = mean(e)
esd = sd(e)
elen = length(e)
neff = NA
if (anyNA(e) == FALSE) neff = IAM_mcmc_neff(e)
mcmcerror = NA
if (anyNA(e) == FALSE) mcmcerror = IAM_mcmc_error(e)
q = quantile(e,prob=c(0.025, 0.5,0.975), names=FALSE, na.rm=TRUE)
cat(sprintf(" %10s%10.2f%10.2f%10.2f%10.2f%10.2f%10.2f%10.2f\n", subitem, emean, esd, q[1], q[2], q[3], neff, mcmcerror));
}
}
}
}
## INTERNAL
# AM_reshape is an internal function for reshaping the fit output into the correct form
AM_reshape <- function(fit, y){
y_dim = dim(y)[2]
if (is.null(y_dim)){
y_dim = 1}
if (!is.null(fit$theta)){
theta_mat = as.matrix(fit$theta)
theta = apply(theta_mat, 1, function(x){
x_vec = as.numeric(unlist(x))
matrix(x_vec, ncol=y_dim, nrow=length(x_vec)/y_dim, byrow=F)})
fit$theta = theta
}
if (!is.null(fit$mu)){
mu = mapply(function(x, m){
mu_size = length(x)/m
mu_individual = split(x, ceiling(seq_along(x)/mu_size))
mu_combined = lapply(mu_individual, function(mu_vec){
matrix(unlist(mu_vec), nrow=y_dim, byrow=F)
})
}, fit$mu, fit$M)
fit$mu = mu
}
if (!is.null(fit$sig2)){
sig = mapply(function(x, m){
sig_size = length(x)/m
sig_individual = split(x, ceiling(seq_along(x)/sig_size))
sig_combined = lapply(sig_individual, function(sig_vec){
matrix(unlist(sig_vec), ncol=y_dim, nrow=y_dim, byrow=F)
})
}, fit$sig2, fit$M)
fit$sig2 = sig
}
if (!is.null(fit$Sig)){
sig = mapply(function(x, m){
sig_size = length(x)/m
sig_individual = split(x, ceiling(seq_along(x)/sig_size))
sig_combined = lapply(sig_individual, function(sig_vec){
matrix(unlist(sig_vec), ncol=y_dim, nrow=y_dim, byrow=F)
})
}, fit$Sig, fit$M)
fit$Sig = sig
}
return(fit)
}
#################################################################################
##### AM_mcmc_fit function
#################################################################################
#' Performs a Gibbs sampling
#'
#' The \code{AM_mcmc_fit} function performs a Gibbs sampling in order to estimate the mixture comprising the sample data \code{y}.
#' The mixture selected must be of a predefined type \code{mix_kernel_hyperparams} (defined with \code{AM_mix_hyperparams_*} functions, where star
#' \code{*} denotes the chosen kernel).
#' Additionally, a prior distribution on the number of mixture components
#' must be specified through \code{mix_components_prior}
#' (generated with \code{AM_mix_components_prior_*} functions, where \code{*} denotes the chosen prior). Similarly,
#' a prior on the weights of the mixture should be specified through \code{mix_weight_prior}
#' (defined with \code{AM_mix_weights_prior_*} functions). Finally, with \code{mcmc_parameters}, the user sets
#' the MCMC parameters for the Gibbs sampler (defined with \code{\link{AM_mcmc_parameters}} functions).
#'
#' If no initial clustering is specified (either as \code{init_K} or \code{init_clustering}),
#' then every observation is allocated to a different cluster.
#' If \code{init_K} is specified then AntMAN initialises the clustering through K-means.
#'
#' **Warning**: if the user does not specify init_K or initial_cluster, the first steps can be be time-consuming because of default setting of the initial clustering.
#'
#'
#'@param y input data, can be a vector or a matrix.
#'@param mix_kernel_hyperparams is a configuration list, defined by *_mix_hyperparams functions, where * denotes the chosen kernel.
#'See \code{\link{AM_mix_hyperparams_multiber}}, \code{\link{AM_mix_hyperparams_multinorm}}, \code{\link{AM_mix_hyperparams_uninorm}}, \code{\link{AM_mix_hyperparams_unipois}} for more details.
#'@param initial_clustering is a vector CI of initial cluster assignement. If no clustering is specified (either as \code{init_K} or \code{init_clustering}), then every observation is
#' assigned to its own cluster.
#'@param fixed_clustering if specified, this is the vector CI containing the cluster assignments. This will remain unchanged for every iteration.
#'@param init_K initial value for the number of cluster. When this is specified, AntMAN intitialises the clustering assign usng K-means.
#'@param mix_components_prior is a configuration list defined by AM_mix_components_prior_* functions, where * denotes the chosen prior.
#' See \code{\link{AM_mix_components_prior_dirac}},
#' \cr \code{\link{AM_mix_components_prior_negbin}}, \code{\link{AM_mix_components_prior_pois}} for more \cr
#' details.
#'@param mix_weight_prior is a configuration list defined by AM_weight_prior_* functions, where * denotes the chosen prior specification.
#' See \code{\link{AM_mix_weights_prior_gamma}} for more \cr details.
#'@param mcmc_parameters is a configuration list defined by AM_mcmc_parameters. See \code{\link{AM_mcmc_parameters}} for more details.
#'@return The return value is an \code{\link{AM_mcmc_output}} object.
#'@examples
#' \donttest{
#' AM_mcmc_fit( AM_sample_unipois()$y,
#' AM_mix_hyperparams_unipois (alpha0=2, beta0=0.2),
#' mcmc_parameters = AM_mcmc_parameters(niter=50, burnin=0, thin=1, verbose=0))
#' }
#'@useDynLib AntMAN
#'@export
AM_mcmc_fit <- function(
y,
mix_kernel_hyperparams,
initial_clustering = NULL,
init_K = NULL,
fixed_clustering = NULL,
mix_components_prior = AM_mix_components_prior_pois() ,
mix_weight_prior = AM_mix_weights_prior_gamma(),
mcmc_parameters = AM_mcmc_parameters() ) {
fixed_cluster = FALSE
if (is.null(fixed_clustering) & is.null(init_K) & !is.null(initial_clustering)) {
fixed_cluster = FALSE
# initial_clustering is set
} else if (!is.null(init_K) & is.null(initial_clustering)& is.null(fixed_clustering)) {
fixed_cluster = FALSE
initial_clustering <- kmeans(y, init_K)$cluster
# initial_clustering is set
} else if (is.null(init_K) & is.null(initial_clustering)& is.null(fixed_clustering)) {
fixed_cluster = FALSE
initial_clustering <- 0:(NROW(y)-1)
# initial_clustering is set
} else if (is.null(init_K) & is.null(initial_clustering)& !is.null(fixed_clustering)) {
fixed_cluster = TRUE
initial_clustering = fixed_clustering
}
else {
stop("Please provide only one of K_init or initial_clustering or fixed_clustering.")
}
fit_result = (structure(
IAM_mcmc_fit(y = y, mix_kernel_hyperparams = mix_kernel_hyperparams, initial_clustering = initial_clustering, fixed_clustering= fixed_cluster , mix_components_prior = mix_components_prior, mix_weight_prior = mix_weight_prior, mcmc_parameters = mcmc_parameters)
, class = "AM_mcmc_output",
mix_kernel_hyperparams = mix_kernel_hyperparams,
initial_clustering = initial_clustering,
init_K = init_K,
fixed_clustering = fixed_clustering,
mix_components_prior =mix_components_prior ,
mix_weight_prior = mix_weight_prior,
mcmc_parameters =mcmc_parameters));
fit_result = AM_reshape(fit_result, y)
return (fit_result)
}
#' Performs a Gibbs sampling reusing previous configuration
#'
#' Similar to \code{\link{AM_mcmc_fit}}, the \code{AM_mcmc_refit} function performs a Gibbs sampling in order to estimate
#' a mixture. However parameters will be reused from a previous result from \code{\link{AM_mcmc_fit}}.
#'
#' In practice this function will call AM_mcmc_fit(y, fixed_clustering = fixed_clustering, ...); with the same parameters as previously
#' specified.
#'
#'@param y input data, can be a vector or a matrix.
#'@param fit previous output from \code{\link{AM_mcmc_fit}} that is used to setup kernel and priors.
#'@param fixed_clustering is a vector CI of cluster assignment that will remain unchanged for every iterations.
#'@param mcmc_parameters is a configuration list defined by \code{\link{AM_mcmc_parameters}}.
#'@return The return value is an \code{\link{AM_mcmc_output}} object.
#'@examples
#' \donttest{
#' y = AM_sample_unipois()$y
#' fit = AM_mcmc_fit( y ,
#' AM_mix_hyperparams_unipois (alpha0=2, beta0=0.2),
#' mcmc_parameters = AM_mcmc_parameters(niter=20, burnin=0, thin=1, verbose=0))
#' eam = AM_coclustering(fit)
#' cluster = AM_salso(eam, "binder")
#' refit = AM_mcmc_refit(y , fit, cluster,
#' mcmc_parameters = AM_mcmc_parameters(niter=20, burnin=0, thin=1, verbose=0));
#' }
#'@export
AM_mcmc_refit <- function(
y, fit,
fixed_clustering,
mcmc_parameters = AM_mcmc_parameters() ) {
mcp = attr(fit,'mix_components_prior')
mwp = attr(fit,'mix_weight_prior')
mkh = attr(fit,'mix_kernel_hyperparams')
## TODO : check input data size
AM_mcmc_fit(y,
mix_kernel_hyperparams = mkh,
fixed_clustering = fixed_clustering,
mix_components_prior = mcp ,
mix_weight_prior = mwp,
mcmc_parameters = mcmc_parameters );
}
#################################################################################
##### AM_mcmc_parameters function
#################################################################################
#' MCMC Parameters
#'
#' This function generates an MCMC parameters list to be used as \code{mcmc_parameters} argument within \code{\link{AM_mcmc_fit}}.
#'
#'
#'
#'@param niter Total number of MCMC iterations to be carried out.
#'@param burnin Number of iterations to be considered as burn-in. Samples from this burn-in period are discarded.
#'@param thin Thinning rate. This argument specifies how often a draw from the posterior distribution is stored after
#' burnin, i.e. one every -th samples is saved. Therefore, the toral number of MCMC samples saved is
#' (\code{niter} -\code{burnin})/\code{thin}. If thin =1, then AntMAN stores every iteration.
#'@param verbose A value from 0 to 4, that specifies the desired level of verbosity (0:None, 1:Warnings, 2:Debug, 3:Extras).
#'@param output A list of parameters output to return.
#'@param output_dir Path to an output dir, where to store all the outputs.
#'@param parallel Some of the algorithms can be run in parallel using OpenMP. When set to True, this parameter triggers the parallelism.
#'@return An \code{\link{AM_mcmc_configuration}} Object. This is a list to be used as \code{mcmc_parameters} argument with \code{\link{AM_mcmc_fit}}.
#'@examples
#' AM_mcmc_parameters (niter=1000, burnin=10000, thin=50)
#' AM_mcmc_parameters (niter=1000, burnin=10000, thin=50, output=c("CI","W","TAU"))
#'@export
AM_mcmc_parameters <- function( niter=5000,
burnin=2500, ## niter / 2
thin=1,
verbose = 1,
output=c("CI","K"),
parallel=TRUE,
output_dir = NULL) {
return (structure(list(type="AM_MCMC_PARAMETERS",
niter=niter, burnin=burnin, thin=thin,
verbose=verbose, output=output, parallel=parallel,
output_dir=output_dir), class = "AM_mcmc_configuration") );
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/word_position.R
\name{word_position}
\alias{word_position}
\title{Word Position}
\usage{
word_position(
text.var,
match.terms,
digits = 2,
percent = TRUE,
zero.replace = 0,
...
)
}
\arguments{
\item{text.var}{The text variable.}
\item{match.terms}{A character vector of quoted terms to find the positions of.}
\item{digits}{Integer; number of decimal places to round when printing.}
\item{percent}{logical. If \code{TRUE} output given as percent. If
\code{FALSE} the output is proportion.}
\item{zero.replace}{Value to replace 0 values with.}
\item{\ldots}{Currently ignored.}
}
\value{
Returns a list, of class "word_position", of data frames and
information regarding word positions:
\item{raw}{raw word position counts in long format (may be more useful for plotting)}
\item{count}{integer word position counts}
\item{prop}{proportional word position counts; proportional to
each total word uses}
\item{rnp}{a character combination data frame of count and proportional}
\item{zero_replace}{value to replace zeros with; mostly internal use}
\item{percent}{The value of percent used for plotting purposes.}
\item{digits}{integer value of number of digits to display; mostly internal
use}
}
\description{
Find counts of the positioning of words within a sentence.
}
\note{
Default printing is a heatmap plot.
}
\examples{
\dontrun{
position <- with(DATA, word_position(sent_detect(state), Top25Words))
position
lview(position)
plot(position)
scores(position)
preprocessed(position)
counts(position)
proportions(position)
plot(proportions(position))
stopwords <- unique(c(contractions[[1]], Top200Words))
topwords <- freq_terms(pres_debates2012[["dialogue"]], top = 40,
at.least = 4, stopwords = stopwords)[[1]]
word_position(pres_debates2012[["dialogue"]], topwords)
plot(word_position(pres_debates2012[["dialogue"]], topwords), FALSE)
plot(word_position(pres_debates2012[["dialogue"]], topwords), TRUE, scale=FALSE)
wordlist <- c("tax", "health", "rich", "america", "truth", "money", "cost",
"governnor", "president", "we", "job", "i", "you", "because",
"our", "years")
word_position(pres_debates2012[["dialogue"]], wordlist)
## BY VARIABLES
library(gridExtra)
pres_deb_by_time <- with(pres_debates2012, split(dialogue, time))
out1 <-lapply(pres_deb_by_time, word_position, wordlist)
do.call("grid.arrange", c(lapply(out1, plot), ncol=1))
pres_deb_by_person <- with(pres_debates2012, split(dialogue, person))
out2 <-lapply(pres_deb_by_person, word_position, wordlist)
plots <- lapply(names(out2), function(x) plot(out2[[x]], scale=FALSE) +
ggtitle(x))
do.call("grid.arrange", c(plots, ncol=2))
## As a histogram
## theme taken from: http://jonlefcheck.net/2013/03/11/black-theme-for-ggplot2-2/
theme_black <- function(base_size=12,base_family="") {
theme_grey(base_size=base_size,base_family=base_family) \%+replace\%
theme(
# Specify axis options
axis.line=element_blank(),
axis.text.x=element_text(size=base_size*0.8,color="grey55",
lineheight=0.9,vjust=1),
axis.text.y=element_text(size=base_size*0.8,color="grey55",
lineheight=0.9,hjust=1),
axis.ticks=element_line(color="grey55",size = 0.2),
axis.title.x=element_text(size=base_size,color="grey55",vjust=1),
axis.title.y=element_text(size=base_size,color="grey55",angle=90,
vjust=0.5),
axis.ticks.length=unit(0.3,"lines"),
axis.ticks.margin=unit(0.5,"lines"),
# Specify legend options
legend.background=element_rect(color=NA,fill="black"),
legend.key=element_rect(color="grey55", fill="black"),
legend.key.size=unit(1.2,"lines"),
legend.key.height=NULL,
legend.key.width=NULL,
legend.text=element_text(size=base_size*0.8,color="grey55"),
legend.title=element_text(size=base_size*0.8,face="bold",hjust=0,
color="grey55"),
legend.position="right",
legend.text.align=NULL,
legend.title.align=NULL,
legend.direction="vertical",
legend.box=NULL,
# Specify panel options
panel.background=element_rect(fill="black",color = NA),
panel.border=element_rect(fill=NA,color="grey55"),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
panel.spacing=unit(0.25,"lines"),
# Specify facetting options
strip.background=element_rect(fill="grey30",color="grey10"),
strip.text.x=element_text(size=base_size*0.8,color="grey55"),
strip.text.y=element_text(size=base_size*0.8,color="grey55",
angle=-90),
# Specify plot options
plot.background=element_rect(color="black",fill="black"),
plot.title=element_text(size=base_size*1.2,color="grey55"),
plot.margin=unit(c(1,1,0.5,0.5),"lines")
)
}
out3 <- list_df2df(lapply(out2[1:2], preprocessed), "Person")
out3 \%>\% ggplot(aes(x=position)) +
geom_histogram(binwidth = 1, fill="white") +
facet_grid(Person~word) +
theme_black() + ylab("Count") + xlab("Position")
## MOVE TO THE MICRO THROUGH QUALITATIVE ANALYSIS
locs <- unlist(setNames(lapply(wordlist, function(x){
sapply(c("ROMNEY", "OBAMA"), function(y){
which(pres_debates2012[["person"]] ==y & grepl(x, pres_debates2012[["dialogue"]]))
})
}), wordlist), recursive=FALSE)
fdl <- qdap:::folder(pres_context)
Map(function(x, y){
if (identical(integer(0), x)) return(NULL)
z <- with(pres_debates2012, trans_context(dialogue, person, inds=x, n.before=1))
z[["text"]] <- gsub(beg2char(y, "."),
paste0("[[", beg2char(y, "."), "]]"), z[["text"]])
print(z, file=file.path(fdl, sprintf("\%s.doc", y)))
}, locs, names(locs))
}
}
\keyword{position}
\keyword{word}
| /man/word_position.Rd | no_license | cran/qdap | R | false | true | 5,919 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/word_position.R
\name{word_position}
\alias{word_position}
\title{Word Position}
\usage{
word_position(
text.var,
match.terms,
digits = 2,
percent = TRUE,
zero.replace = 0,
...
)
}
\arguments{
\item{text.var}{The text variable.}
\item{match.terms}{A character vector of quoted terms to find the positions of.}
\item{digits}{Integer; number of decimal places to round when printing.}
\item{percent}{logical. If \code{TRUE} output given as percent. If
\code{FALSE} the output is proportion.}
\item{zero.replace}{Value to replace 0 values with.}
\item{\ldots}{Currently ignored.}
}
\value{
Returns a list, of class "word_position", of data frames and
information regarding word positions:
\item{raw}{raw word position counts in long format (may be more useful for plotting)}
\item{count}{integer word position counts}
\item{prop}{proportional word position counts; proportional to
each total word uses}
\item{rnp}{a character combination data frame of count and proportional}
\item{zero_replace}{value to replace zeros with; mostly internal use}
\item{percent}{The value of percent used for plotting purposes.}
\item{digits}{integer value of number of digits to display; mostly internal
use}
}
\description{
Find counts of the positioning of words within a sentence.
}
\note{
Default printing is a heatmap plot.
}
\examples{
\dontrun{
position <- with(DATA, word_position(sent_detect(state), Top25Words))
position
lview(position)
plot(position)
scores(position)
preprocessed(position)
counts(position)
proportions(position)
plot(proportions(position))
stopwords <- unique(c(contractions[[1]], Top200Words))
topwords <- freq_terms(pres_debates2012[["dialogue"]], top = 40,
at.least = 4, stopwords = stopwords)[[1]]
word_position(pres_debates2012[["dialogue"]], topwords)
plot(word_position(pres_debates2012[["dialogue"]], topwords), FALSE)
plot(word_position(pres_debates2012[["dialogue"]], topwords), TRUE, scale=FALSE)
wordlist <- c("tax", "health", "rich", "america", "truth", "money", "cost",
"governnor", "president", "we", "job", "i", "you", "because",
"our", "years")
word_position(pres_debates2012[["dialogue"]], wordlist)
## BY VARIABLES
library(gridExtra)
pres_deb_by_time <- with(pres_debates2012, split(dialogue, time))
out1 <-lapply(pres_deb_by_time, word_position, wordlist)
do.call("grid.arrange", c(lapply(out1, plot), ncol=1))
pres_deb_by_person <- with(pres_debates2012, split(dialogue, person))
out2 <-lapply(pres_deb_by_person, word_position, wordlist)
plots <- lapply(names(out2), function(x) plot(out2[[x]], scale=FALSE) +
ggtitle(x))
do.call("grid.arrange", c(plots, ncol=2))
## As a histogram
## theme taken from: http://jonlefcheck.net/2013/03/11/black-theme-for-ggplot2-2/
theme_black <- function(base_size=12,base_family="") {
theme_grey(base_size=base_size,base_family=base_family) \%+replace\%
theme(
# Specify axis options
axis.line=element_blank(),
axis.text.x=element_text(size=base_size*0.8,color="grey55",
lineheight=0.9,vjust=1),
axis.text.y=element_text(size=base_size*0.8,color="grey55",
lineheight=0.9,hjust=1),
axis.ticks=element_line(color="grey55",size = 0.2),
axis.title.x=element_text(size=base_size,color="grey55",vjust=1),
axis.title.y=element_text(size=base_size,color="grey55",angle=90,
vjust=0.5),
axis.ticks.length=unit(0.3,"lines"),
axis.ticks.margin=unit(0.5,"lines"),
# Specify legend options
legend.background=element_rect(color=NA,fill="black"),
legend.key=element_rect(color="grey55", fill="black"),
legend.key.size=unit(1.2,"lines"),
legend.key.height=NULL,
legend.key.width=NULL,
legend.text=element_text(size=base_size*0.8,color="grey55"),
legend.title=element_text(size=base_size*0.8,face="bold",hjust=0,
color="grey55"),
legend.position="right",
legend.text.align=NULL,
legend.title.align=NULL,
legend.direction="vertical",
legend.box=NULL,
# Specify panel options
panel.background=element_rect(fill="black",color = NA),
panel.border=element_rect(fill=NA,color="grey55"),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
panel.spacing=unit(0.25,"lines"),
# Specify facetting options
strip.background=element_rect(fill="grey30",color="grey10"),
strip.text.x=element_text(size=base_size*0.8,color="grey55"),
strip.text.y=element_text(size=base_size*0.8,color="grey55",
angle=-90),
# Specify plot options
plot.background=element_rect(color="black",fill="black"),
plot.title=element_text(size=base_size*1.2,color="grey55"),
plot.margin=unit(c(1,1,0.5,0.5),"lines")
)
}
out3 <- list_df2df(lapply(out2[1:2], preprocessed), "Person")
out3 \%>\% ggplot(aes(x=position)) +
geom_histogram(binwidth = 1, fill="white") +
facet_grid(Person~word) +
theme_black() + ylab("Count") + xlab("Position")
## MOVE TO THE MICRO THROUGH QUALITATIVE ANALYSIS
locs <- unlist(setNames(lapply(wordlist, function(x){
sapply(c("ROMNEY", "OBAMA"), function(y){
which(pres_debates2012[["person"]] ==y & grepl(x, pres_debates2012[["dialogue"]]))
})
}), wordlist), recursive=FALSE)
fdl <- qdap:::folder(pres_context)
Map(function(x, y){
if (identical(integer(0), x)) return(NULL)
z <- with(pres_debates2012, trans_context(dialogue, person, inds=x, n.before=1))
z[["text"]] <- gsub(beg2char(y, "."),
paste0("[[", beg2char(y, "."), "]]"), z[["text"]])
print(z, file=file.path(fdl, sprintf("\%s.doc", y)))
}, locs, names(locs))
}
}
\keyword{position}
\keyword{word}
|
globalVariables(c("str", ".", "str_is_run"))
image_entry <- function(src, width, height){
x <- data.frame(image_src = src, width = width, height = height, stringsAsFactors = FALSE)
class(x) <- c( "image_entry", class(x) )
x
}
format.image_entry = function (x, type = "console", ...){
stopifnot( length(type) == 1)
stopifnot( type %in% c("wml", "pml", "html", "console") )
if( type == "pml" ){
out <- rep("", nrow(x))
} else if( type == "console" ){
out <- rep("{image_entry:{...}}", nrow(x))
} else {
out <- mapply( function(image_src, width, height){
format( external_img(src = image_src, width = width, height = height), type = type )
}, x$image_src, x$width, x$height, SIMPLIFY = FALSE)
out <- setNames(unlist(out), NULL)
}
out
}
drop_column <- function(x, cols){
x[, !(colnames(x) %in% cols), drop = FALSE]
}
as_grp_index <- function(x){
sprintf( "gp_%09.0f", x )
}
group_index <- function(x, by, varname = "grp"){
order_ <- do.call( order, x[ by ] )
x$ids_ <- seq_along(order_)
x <- x[order_, ,drop = FALSE]
gprs <- cumsum(!duplicated(x[, by ]) )
gprs <- gprs[order(x$ids_)]
as_grp_index(gprs)
}
group_ref <- function(x, by, varname = "grp"){
order_ <- do.call( order, x[ by ] )
x$ids_ <- seq_along(order_)
x <- x[order_, ,drop = FALSE]
ref <- x[!duplicated(x[, by ]), by]
ref$index_ <- as_grp_index( seq_len( nrow(ref) ) )
row.names(ref) <- NULL
ref
}
drop_useless_blank <- function( x ){
grp <- group_index(x, by = c("col_key", "idrow") )
x <- split( x, grp)
x <- lapply( x, function(x){
non_empty <- which( !x$str %in% c("", NA) | x$type_out %in% "image_entry" )
if(length(non_empty)) x[non_empty,]
else x[1,]
})
do.call(rbind, x)
}
get_i_from_formula <- function( f, data ){
if( length(f) > 2 )
stop("formula selection is not as expected ( ~ condition )", call. = FALSE)
i <- eval(as.call(f[[2]]), envir = data)
if( !is.logical(i) )
stop("formula selection should return a logical vector", call. = FALSE)
i
}
get_j_from_formula <- function( f, data ){
if( length(f) > 2 )
stop("formula selection is not as expected ( ~ variables )", call. = FALSE)
j <- attr(terms(f), "term.labels")
names_ <- names(data)
if( any( invalid_names <- (!j %in% names_) ) ){
invalid_names <- paste0("[", j[invalid_names], "]", collapse = ", ")
stop("unknown variables:", invalid_names, call. = FALSE)
}
j
}
check_formula_i_and_part <- function(i, part){
if( inherits(i, "formula") && "header" %in% part ){
stop("formula in argument i cannot adress part '", part, "'.", call. = FALSE)
} else if( inherits(i, "formula") && "footer" %in% part ){
stop("formula in argument i cannot adress part '", part, "'.", call. = FALSE)
}
TRUE
}
nrow_part <- function(x, part){
if( is.null(x[[part]]) )
0
else if( is.null(x[[part]]$dataset) )
0
else nrow(x[[part]]$dataset)
}
process_url <- function(rel, url, str, pattern, double_esc = TRUE){
new_rid <- sprintf("rId%.0f", rel$get_next_id())
if(double_esc)
escape <- function(x) htmlEscape(htmlEscape(x))
else escape <- function(x) htmlEscape(x)# it seems that word does not behave as powerpoint
rel$add(
id = new_rid, type = "http://schemas.openxmlformats.org/officeDocument/2006/relationships/hyperlink",
target = escape(url), target_mode = "External" )
str_replace_all( string = str,
fixed( sprintf("<%s r:id=\"%s\"", pattern, url ) ),
sprintf("<%s r:id=\"%s\"", pattern, new_rid )
)
}
create_display <- function(data, col_keys){
set_formatter_type_formals <- formals(set_formatter_type)
formatters <- mapply(function(x, varname){
if( is.double(x) ) paste0(varname, " ~ sprintf(", shQuote(set_formatter_type_formals$fmt_double), ", `", varname ,"`)")
else if( is.integer(x) ) paste0(varname, " ~ sprintf(", shQuote(set_formatter_type_formals$fmt_integer), ", `", varname ,"`)")
else if( is.factor(x) ) paste0(varname, " ~ as.character(`", varname ,"`)")
else if( is.character(x) ) paste0(varname, " ~ as.character(`", varname ,"`)")
else if( is.logical(x) ) paste0(varname, " ~ as.character(`", varname ,"`)")
else if( inherits(x, "Date") ) paste0(varname, " ~ format(`", varname ,"`, ", shQuote(set_formatter_type_formals$fmt_date), ")")
else if( inherits(x, "POSIXt") ) paste0(varname, " ~ format(`", varname ,"`, ", shQuote(set_formatter_type_formals$fmt_datetime), ")")
else paste0(varname, " ~ ", set_formatter_type_formals$fun_any, "(`", varname ,"`)")
}, data[col_keys], col_keys, SIMPLIFY = FALSE)
formatters <- mapply(function(f, varname){
display_parser$new(x = paste0("{{", varname, "}}"),
formatters = list( as.formula( f ) ),
fprops = list() )
}, formatters, col_keys )
display_structure$new(nrow(data), col_keys, formatters )
}
format_fun <- function( x, na_string = "", ... ){
UseMethod("format_fun")
}
format_fun.default <- function( x, na_string = "", ... ){
ifelse( is.na(x), na_string, format(x) )
}
format_fun.character <- function( x, na_string = "", ... ){
ifelse( is.na(x), na_string, x )
}
format_fun.factor <- function( x, na_string = "", ... ){
ifelse( is.na(x), na_string, as.character(x) )
}
format_fun.logical <- function( x, na_string = "", true = "true", false = "false" ){
ifelse( is.na(x), na_string, ifelse(x, true, false) )
}
format_fun.double <- function( x, na_string = "", fmt_double, ... ){
ifelse( is.na(x), na_string, sprintf(fmt_double, x) )
}
format_fun.integer <- function( x, na_string = "", fmt_integer, ... ){
ifelse( is.na(x), na_string, sprintf(fmt_integer, x) )
}
format_fun.Date <- function( x, na_string = "", fmt_date, ... ){
ifelse( is.na(x), na_string, format(x, fmt_date) )
}
format_fun.POSIXt <- function( x, na_string = "", fmt_datetime, ... ){
ifelse( is.na(x), na_string, format(x, fmt_datetime) )
}
| /R/utils.R | no_license | ThiAmm/flextable | R | false | false | 5,931 | r |
globalVariables(c("str", ".", "str_is_run"))
image_entry <- function(src, width, height){
x <- data.frame(image_src = src, width = width, height = height, stringsAsFactors = FALSE)
class(x) <- c( "image_entry", class(x) )
x
}
format.image_entry = function (x, type = "console", ...){
stopifnot( length(type) == 1)
stopifnot( type %in% c("wml", "pml", "html", "console") )
if( type == "pml" ){
out <- rep("", nrow(x))
} else if( type == "console" ){
out <- rep("{image_entry:{...}}", nrow(x))
} else {
out <- mapply( function(image_src, width, height){
format( external_img(src = image_src, width = width, height = height), type = type )
}, x$image_src, x$width, x$height, SIMPLIFY = FALSE)
out <- setNames(unlist(out), NULL)
}
out
}
drop_column <- function(x, cols){
x[, !(colnames(x) %in% cols), drop = FALSE]
}
as_grp_index <- function(x){
sprintf( "gp_%09.0f", x )
}
group_index <- function(x, by, varname = "grp"){
order_ <- do.call( order, x[ by ] )
x$ids_ <- seq_along(order_)
x <- x[order_, ,drop = FALSE]
gprs <- cumsum(!duplicated(x[, by ]) )
gprs <- gprs[order(x$ids_)]
as_grp_index(gprs)
}
group_ref <- function(x, by, varname = "grp"){
order_ <- do.call( order, x[ by ] )
x$ids_ <- seq_along(order_)
x <- x[order_, ,drop = FALSE]
ref <- x[!duplicated(x[, by ]), by]
ref$index_ <- as_grp_index( seq_len( nrow(ref) ) )
row.names(ref) <- NULL
ref
}
drop_useless_blank <- function( x ){
grp <- group_index(x, by = c("col_key", "idrow") )
x <- split( x, grp)
x <- lapply( x, function(x){
non_empty <- which( !x$str %in% c("", NA) | x$type_out %in% "image_entry" )
if(length(non_empty)) x[non_empty,]
else x[1,]
})
do.call(rbind, x)
}
get_i_from_formula <- function( f, data ){
if( length(f) > 2 )
stop("formula selection is not as expected ( ~ condition )", call. = FALSE)
i <- eval(as.call(f[[2]]), envir = data)
if( !is.logical(i) )
stop("formula selection should return a logical vector", call. = FALSE)
i
}
get_j_from_formula <- function( f, data ){
if( length(f) > 2 )
stop("formula selection is not as expected ( ~ variables )", call. = FALSE)
j <- attr(terms(f), "term.labels")
names_ <- names(data)
if( any( invalid_names <- (!j %in% names_) ) ){
invalid_names <- paste0("[", j[invalid_names], "]", collapse = ", ")
stop("unknown variables:", invalid_names, call. = FALSE)
}
j
}
check_formula_i_and_part <- function(i, part){
if( inherits(i, "formula") && "header" %in% part ){
stop("formula in argument i cannot adress part '", part, "'.", call. = FALSE)
} else if( inherits(i, "formula") && "footer" %in% part ){
stop("formula in argument i cannot adress part '", part, "'.", call. = FALSE)
}
TRUE
}
nrow_part <- function(x, part){
if( is.null(x[[part]]) )
0
else if( is.null(x[[part]]$dataset) )
0
else nrow(x[[part]]$dataset)
}
process_url <- function(rel, url, str, pattern, double_esc = TRUE){
new_rid <- sprintf("rId%.0f", rel$get_next_id())
if(double_esc)
escape <- function(x) htmlEscape(htmlEscape(x))
else escape <- function(x) htmlEscape(x)# it seems that word does not behave as powerpoint
rel$add(
id = new_rid, type = "http://schemas.openxmlformats.org/officeDocument/2006/relationships/hyperlink",
target = escape(url), target_mode = "External" )
str_replace_all( string = str,
fixed( sprintf("<%s r:id=\"%s\"", pattern, url ) ),
sprintf("<%s r:id=\"%s\"", pattern, new_rid )
)
}
create_display <- function(data, col_keys){
set_formatter_type_formals <- formals(set_formatter_type)
formatters <- mapply(function(x, varname){
if( is.double(x) ) paste0(varname, " ~ sprintf(", shQuote(set_formatter_type_formals$fmt_double), ", `", varname ,"`)")
else if( is.integer(x) ) paste0(varname, " ~ sprintf(", shQuote(set_formatter_type_formals$fmt_integer), ", `", varname ,"`)")
else if( is.factor(x) ) paste0(varname, " ~ as.character(`", varname ,"`)")
else if( is.character(x) ) paste0(varname, " ~ as.character(`", varname ,"`)")
else if( is.logical(x) ) paste0(varname, " ~ as.character(`", varname ,"`)")
else if( inherits(x, "Date") ) paste0(varname, " ~ format(`", varname ,"`, ", shQuote(set_formatter_type_formals$fmt_date), ")")
else if( inherits(x, "POSIXt") ) paste0(varname, " ~ format(`", varname ,"`, ", shQuote(set_formatter_type_formals$fmt_datetime), ")")
else paste0(varname, " ~ ", set_formatter_type_formals$fun_any, "(`", varname ,"`)")
}, data[col_keys], col_keys, SIMPLIFY = FALSE)
formatters <- mapply(function(f, varname){
display_parser$new(x = paste0("{{", varname, "}}"),
formatters = list( as.formula( f ) ),
fprops = list() )
}, formatters, col_keys )
display_structure$new(nrow(data), col_keys, formatters )
}
format_fun <- function( x, na_string = "", ... ){
UseMethod("format_fun")
}
format_fun.default <- function( x, na_string = "", ... ){
ifelse( is.na(x), na_string, format(x) )
}
format_fun.character <- function( x, na_string = "", ... ){
ifelse( is.na(x), na_string, x )
}
format_fun.factor <- function( x, na_string = "", ... ){
ifelse( is.na(x), na_string, as.character(x) )
}
format_fun.logical <- function( x, na_string = "", true = "true", false = "false" ){
ifelse( is.na(x), na_string, ifelse(x, true, false) )
}
format_fun.double <- function( x, na_string = "", fmt_double, ... ){
ifelse( is.na(x), na_string, sprintf(fmt_double, x) )
}
format_fun.integer <- function( x, na_string = "", fmt_integer, ... ){
ifelse( is.na(x), na_string, sprintf(fmt_integer, x) )
}
format_fun.Date <- function( x, na_string = "", fmt_date, ... ){
ifelse( is.na(x), na_string, format(x, fmt_date) )
}
format_fun.POSIXt <- function( x, na_string = "", fmt_datetime, ... ){
ifelse( is.na(x), na_string, format(x, fmt_datetime) )
}
|
library(ape)
testtree <- read.tree("4172_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="4172_0_unrooted.txt") | /codeml_files/newick_trees_processed/4172_0/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 135 | r | library(ape)
testtree <- read.tree("4172_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="4172_0_unrooted.txt") |
library(cluster)
library(Biobase)
library(qvalue)
library(fastcluster)
options(stringsAsFactors = FALSE)
NO_REUSE = F
# try to reuse earlier-loaded data if possible
if (file.exists("Pocillopora_acuta_PredGenes_experimental_v1.transcripts.cds.fna.salmon.allSamples.numreads.matrix.RData") && ! NO_REUSE) {
print('RESTORING DATA FROM EARLIER ANALYSIS')
load("Pocillopora_acuta_PredGenes_experimental_v1.transcripts.cds.fna.salmon.allSamples.numreads.matrix.RData")
} else {
print('Reading matrix file.')
primary_data = read.table("Pocillopora_acuta_PredGenes_experimental_v1.transcripts.cds.fna.salmon.allSamples.numreads.matrix", header=T, com='', row.names=1, check.names=F, sep='\t')
primary_data = as.matrix(primary_data)
}
source("/home/timothy/programs/trinityrnaseq-v2.11.0/Analysis/DifferentialExpression/R/heatmap.3.R")
source("/home/timothy/programs/trinityrnaseq-v2.11.0/Analysis/DifferentialExpression/R/misc_rnaseq_funcs.R")
source("/home/timothy/programs/trinityrnaseq-v2.11.0/Analysis/DifferentialExpression/R/pairs3.R")
source("/home/timothy/programs/trinityrnaseq-v2.11.0/Analysis/DifferentialExpression/R/vioplot2.R")
data = primary_data
myheatcol = colorpanel(75, 'purple','black','yellow')
samples_data = read.table("samples_Pacuta.txt", header=F, check.names=F, fill=T)
samples_data = samples_data[samples_data[,2] != '',]
colnames(samples_data) = c('sample_name', 'replicate_name')
sample_types = as.character(unique(samples_data[,1]))
rep_names = as.character(samples_data[,2])
data = data[, colnames(data) %in% rep_names, drop=F ]
nsamples = length(sample_types)
sample_colors = rainbow(nsamples)
names(sample_colors) = sample_types
sample_type_list = list()
for (i in 1:nsamples) {
samples_want = samples_data[samples_data[,1]==sample_types[i], 2]
sample_type_list[[sample_types[i]]] = as.vector(samples_want)
}
sample_factoring = colnames(data)
for (i in 1:nsamples) {
sample_type = sample_types[i]
replicates_want = sample_type_list[[sample_type]]
sample_factoring[ colnames(data) %in% replicates_want ] = sample_type
}
data = data[rowSums(data)>=10,]
initial_matrix = data # store before doing various data transformations
cs = colSums(data)
data = t( t(data)/cs) * 1e6;
data = log2(data+1)
sample_factoring = colnames(data)
for (i in 1:nsamples) {
sample_type = sample_types[i]
replicates_want = sample_type_list[[sample_type]]
sample_factoring[ colnames(data) %in% replicates_want ] = sample_type
}
sampleAnnotations = matrix(ncol=ncol(data),nrow=nsamples)
for (i in 1:nsamples) {
sampleAnnotations[i,] = colnames(data) %in% sample_type_list[[sample_types[i]]]
}
sampleAnnotations = apply(sampleAnnotations, 1:2, function(x) as.logical(x))
sampleAnnotations = sample_matrix_to_color_assignments(sampleAnnotations, col=sample_colors)
rownames(sampleAnnotations) = as.vector(sample_types)
colnames(sampleAnnotations) = colnames(data)
data = as.matrix(data) # convert to matrix
write.table(data, file="Pocillopora_acuta_PredGenes_experimental_v1.transcripts.cds.fna.salmon.allSamples.numreads.matrix.minRow10.CPM.log2.dat", quote=F, sep=' ');
if (nrow(data) < 2) { stop("
**** Sorry, at least two rows are required for this matrix.
");}
if (ncol(data) < 2) { stop("
**** Sorry, at least two columns are required for this matrix.
");}
sample_cor = cor(data, method='pearson', use='pairwise.complete.obs')
write.table(sample_cor, file="Pocillopora_acuta_PredGenes_experimental_v1.transcripts.cds.fna.salmon.allSamples.numreads.matrix.minRow10.CPM.log2.sample_cor.dat", quote=F, sep=' ')
sample_dist = dist(t(data), method='euclidean')
hc_samples = hclust(sample_dist, method='complete')
pdf("Pocillopora_acuta_PredGenes_experimental_v1.transcripts.cds.fna.salmon.allSamples.numreads.matrix.minRow10.CPM.log2.sample_cor_matrix.pdf")
sample_cor_for_plot = sample_cor
if (is.null(hc_samples)) { RowV=NULL; ColV=NULL} else { RowV=as.dendrogram(hc_samples); ColV=RowV }
heatmap.3(sample_cor_for_plot, dendrogram='both', Rowv=RowV, Colv=ColV, col = myheatcol, scale='none', symm=TRUE, key=TRUE,density.info='none', trace='none', symkey=FALSE, symbreaks=F, margins=c(10,10), cexCol=1, cexRow=1, cex.main=0.75, main=paste("sample correlation matrix
", "Pocillopora_acuta_PredGenes_experimental_v1.transcripts.cds.fna.salmon.allSamples.numreads.matrix.minRow10.CPM.log2") , ColSideColors=sampleAnnotations, RowSideColors=t(sampleAnnotations))
dev.off()
gene_cor = NULL
| /Genotype_Analysis/Pacuta/Read_Mapping/PredGenes_Pacuta_experimental_CDS_RNAseq_Salmon/gene_correlation/Pocillopora_acuta_PredGenes_experimental_v1.transcripts.cds.fna.salmon.allSamples.numreads.matrix.Step2.R | no_license | hputnam/Coral_Stress_Phenome | R | false | false | 4,442 | r | library(cluster)
library(Biobase)
library(qvalue)
library(fastcluster)
options(stringsAsFactors = FALSE)
NO_REUSE = F
# try to reuse earlier-loaded data if possible
if (file.exists("Pocillopora_acuta_PredGenes_experimental_v1.transcripts.cds.fna.salmon.allSamples.numreads.matrix.RData") && ! NO_REUSE) {
print('RESTORING DATA FROM EARLIER ANALYSIS')
load("Pocillopora_acuta_PredGenes_experimental_v1.transcripts.cds.fna.salmon.allSamples.numreads.matrix.RData")
} else {
print('Reading matrix file.')
primary_data = read.table("Pocillopora_acuta_PredGenes_experimental_v1.transcripts.cds.fna.salmon.allSamples.numreads.matrix", header=T, com='', row.names=1, check.names=F, sep='\t')
primary_data = as.matrix(primary_data)
}
source("/home/timothy/programs/trinityrnaseq-v2.11.0/Analysis/DifferentialExpression/R/heatmap.3.R")
source("/home/timothy/programs/trinityrnaseq-v2.11.0/Analysis/DifferentialExpression/R/misc_rnaseq_funcs.R")
source("/home/timothy/programs/trinityrnaseq-v2.11.0/Analysis/DifferentialExpression/R/pairs3.R")
source("/home/timothy/programs/trinityrnaseq-v2.11.0/Analysis/DifferentialExpression/R/vioplot2.R")
data = primary_data
myheatcol = colorpanel(75, 'purple','black','yellow')
samples_data = read.table("samples_Pacuta.txt", header=F, check.names=F, fill=T)
samples_data = samples_data[samples_data[,2] != '',]
colnames(samples_data) = c('sample_name', 'replicate_name')
sample_types = as.character(unique(samples_data[,1]))
rep_names = as.character(samples_data[,2])
data = data[, colnames(data) %in% rep_names, drop=F ]
nsamples = length(sample_types)
sample_colors = rainbow(nsamples)
names(sample_colors) = sample_types
sample_type_list = list()
for (i in 1:nsamples) {
samples_want = samples_data[samples_data[,1]==sample_types[i], 2]
sample_type_list[[sample_types[i]]] = as.vector(samples_want)
}
sample_factoring = colnames(data)
for (i in 1:nsamples) {
sample_type = sample_types[i]
replicates_want = sample_type_list[[sample_type]]
sample_factoring[ colnames(data) %in% replicates_want ] = sample_type
}
data = data[rowSums(data)>=10,]
initial_matrix = data # store before doing various data transformations
cs = colSums(data)
data = t( t(data)/cs) * 1e6;
data = log2(data+1)
sample_factoring = colnames(data)
for (i in 1:nsamples) {
sample_type = sample_types[i]
replicates_want = sample_type_list[[sample_type]]
sample_factoring[ colnames(data) %in% replicates_want ] = sample_type
}
sampleAnnotations = matrix(ncol=ncol(data),nrow=nsamples)
for (i in 1:nsamples) {
sampleAnnotations[i,] = colnames(data) %in% sample_type_list[[sample_types[i]]]
}
sampleAnnotations = apply(sampleAnnotations, 1:2, function(x) as.logical(x))
sampleAnnotations = sample_matrix_to_color_assignments(sampleAnnotations, col=sample_colors)
rownames(sampleAnnotations) = as.vector(sample_types)
colnames(sampleAnnotations) = colnames(data)
data = as.matrix(data) # convert to matrix
write.table(data, file="Pocillopora_acuta_PredGenes_experimental_v1.transcripts.cds.fna.salmon.allSamples.numreads.matrix.minRow10.CPM.log2.dat", quote=F, sep=' ');
if (nrow(data) < 2) { stop("
**** Sorry, at least two rows are required for this matrix.
");}
if (ncol(data) < 2) { stop("
**** Sorry, at least two columns are required for this matrix.
");}
sample_cor = cor(data, method='pearson', use='pairwise.complete.obs')
write.table(sample_cor, file="Pocillopora_acuta_PredGenes_experimental_v1.transcripts.cds.fna.salmon.allSamples.numreads.matrix.minRow10.CPM.log2.sample_cor.dat", quote=F, sep=' ')
sample_dist = dist(t(data), method='euclidean')
hc_samples = hclust(sample_dist, method='complete')
pdf("Pocillopora_acuta_PredGenes_experimental_v1.transcripts.cds.fna.salmon.allSamples.numreads.matrix.minRow10.CPM.log2.sample_cor_matrix.pdf")
sample_cor_for_plot = sample_cor
if (is.null(hc_samples)) { RowV=NULL; ColV=NULL} else { RowV=as.dendrogram(hc_samples); ColV=RowV }
heatmap.3(sample_cor_for_plot, dendrogram='both', Rowv=RowV, Colv=ColV, col = myheatcol, scale='none', symm=TRUE, key=TRUE,density.info='none', trace='none', symkey=FALSE, symbreaks=F, margins=c(10,10), cexCol=1, cexRow=1, cex.main=0.75, main=paste("sample correlation matrix
", "Pocillopora_acuta_PredGenes_experimental_v1.transcripts.cds.fna.salmon.allSamples.numreads.matrix.minRow10.CPM.log2") , ColSideColors=sampleAnnotations, RowSideColors=t(sampleAnnotations))
dev.off()
gene_cor = NULL
|
###########################################################################
## The use of the term ‘limnology’ and its scientometrics consequences ####
## Diego Fontaneto, Alejandro Martínez, Stefano Mammola, Aldo Marchetto ###
###########################################################################
## R code to prepare the data:
# Working directory -------------------------------------------------------
setwd("") # <- change me :)
# Loading R packages ------------------------------------------------------
library("bazar")
library("dplyr")
library("tidyr")
library("ggplot2")
library("gridExtra")
library("gtable")
library("grid")
library("jaod")
library("quanteda")
# Functions ---------------------------------------------------------------
## Function for checking if a number is a whole number
is.wholenumber <- function(x, tol = .Machine$double.eps^0.5) abs(x - round(x)) < tol
## Function for calculating the standard error
std <- function(x) sd(x)/sqrt(length(x))
## Function for replacing nan with na
is.nan.data.frame <- function(x)
do.call(cbind, lapply(x, is.nan))
## Function for calculating the Flesch reading ease
FRE_index <- function(wordCount, sentenceCount, syllableCount) {
FRE <- 206.835 - (1.015 * (wordCount / sentenceCount)) - (84.6 * (syllableCount / wordCount))
return(FRE)
}
## Function for cleaning abstracts
word.cleaner <- function(word.list, remove.punctuation=FALSE, remove.numbers=FALSE, split=FALSE, split.sep= " ") {
#check if right data is provided
if (class(word.list) != "character"){
word.list <- as.character(word.list)
warning("The input data as been converted as a character string")
}
if(split)
word.list <- strsplit(word.list, split.sep)[[1]]
#all lower case
word.list <- tolower(as.vector(word.list))
if(remove.punctuation){
# remove all punctuation except . , !, and ?
word.list <- gsub("[^[:alnum:][:space:](?|!|.)]", "\\1", word.list)
# Replace each dot that is in between word characters
word.list <- gsub("\\b\\.\\b", "\\1", word.list, perl=TRUE)
# Replace each dot that is in between letters
word.list <- gsub("(?<=\\p{L})\\.(?=\\p{L})", "\\1", word.list, perl=TRUE)
# Replace each dot that is in between word characters, but no in URLs
word.list <- gsub("(?:ht|f)tps?://\\S*(*SKIP)(*F)|\\b\\.\\b", "\\1", word.list, perl=TRUE)
#remove "et al."
word.list <- gsub("et al.", "et al", word.list)
#remove "Sic!"
word.list <- gsub("sic!", "sic", word.list)
}
if(remove.numbers)
word.list <- gsub('[0-9]+', '', word.list)
#remove extra white spaces
word.list <- trimws(word.list)
return(word.list)
}
#Example...
test <- "How Are you? Fine, thanks! The paper by Ripple et al.2017 is great. you can download it at www.bioscience.com (Sic!), and enjoy it. v.e.r.y c.o.o.o.l thanks."
word.cleaner(test, remove.punctuation = TRUE, remove.numbers= TRUE, split = FALSE)
## function for counting words, syllabes, and sentences
abstract.counter <- function(word.list) {
if (class(word.list) != "character"){
word.list <- as.character(word.list)
warning("The input data as been converted as a character string")
}
wordCount <- length(unlist(strsplit(as.character(tokens(word.list, remove_punct = TRUE)), " ")))
syllableCount <- sum( na.omit(quanteda::nsyllable(tokens(word.list, remove_punct = TRUE))$text1) )
sentenceCount <- length( unlist(strsplit(word.list, "[.]")) )
output <- list(wordCount,syllableCount,sentenceCount) ; names(output) <- c("wordCount","syllableCount","sentenceCount")
return(output)
}
#Example...
warning_insect <- "Here we build on the manifesto 'World Scientists' Warning to Humanity, issued by the Alliance of World Scientists. As a group of conservation biologists deeply concerned about the decline of insect populations, we here review what we know about the drivers of insect extinctions, their consequences, and how extinctions can negatively impact humanity. We are causing insect extinctions by driving habitat loss, degradation, and fragmentation, use of polluting and harmful substances, the spread of invasive species, global climate change, direct overexploitation, and co-extinction of species dependent on other species. With insect extinctions, we lose much more than species. We lose abundance and biomass of insects, diversity across space and time with consequent homogenization, large parts of the tree of life, unique ecological functions and traits, and fundamental parts of extensive networks of biotic interactions. Such losses lead to the decline of key ecosystem services on which humanity depends. From pollination and decomposition, to being resources for new medicines, habitat quality indication and many others, insects provide essential and irreplaceable services. We appeal for urgent action to close key knowledge gaps and curb insect extinctions. An investment in research"
example <- word.cleaner(warning_insect, remove.punctuation = TRUE, remove.numbers= TRUE, split = FALSE)
example <- abstract.counter(example)
FRE_index(wordCount=example$wordCount,
sentenceCount=example$sentenceCount,
syllableCount=example$syllableCount)
# Loading the databases #############################
db <- read.csv(file = "db_all.txt", sep = '\t', dec = '.', header = TRUE)
str(db)
dim(db)
# remove missing years (likely papers still in press in 2021)
dim(db)
db <- db %>% drop_na(Publication.Year)
dim(db)
# remove papers in 2021
dim(db)
db <- db[db$Publication.Year<2021,]
dim(db)
# restrict to journal papers only
dim(db)
db <- db[db$Publication.Type=="J",]
dim(db)
#calculating the number of authors and the FRE readability index for each abstract and title
N_aut <- c()
FRE_abs <- c()
FRE_tit <- c()
#run from here:
for (i in 1:nrow(db)){
db_i <- db[i,]
#Number of authors
N_aut <- append(N_aut,length(word.cleaner(word.list=as.character(db_i$Authors),
remove.punctuation=FALSE,
remove.numbers=FALSE,
split=TRUE, split.sep= ";")) )
#Readability abstract & title
if(is.na(db_i$Abstract) == TRUE){
FRE_abs_i <- NA
}
else {
clean_abs <- word.cleaner(as.character(db_i$Abstract), remove.punctuation = TRUE, remove.numbers= TRUE, split = FALSE)
clean_abs <- abstract.counter(clean_abs)
FRE_abs_i <- FRE_index(wordCount = clean_abs$wordCount, sentenceCount = clean_abs$sentenceCount, syllableCount = clean_abs$syllableCount)
}
if(is.na(db_i$Article.Title) == TRUE){
FRE_tit_i <- NA
}
else {
clean_tit <- word.cleaner(as.character(db_i$Article.Title), remove.punctuation = TRUE, remove.numbers= TRUE, split = FALSE)
clean_tit <- abstract.counter(clean_tit)
FRE_tit_i <- FRE_index(wordCount = clean_tit$wordCount, sentenceCount = clean_tit$sentenceCount, syllableCount = clean_tit$syllableCount)
}
FRE_abs <- append(FRE_abs,FRE_abs_i)
FRE_tit <- append(FRE_tit,FRE_tit_i)
# Checking the advancing
if(is.wholenumber(i/100) == TRUE)
message(paste("Analyzed", as.character(i), "papers out of", as.character(nrow(db)),sep=" "))
}
#end
# store and save
db2 <- data.frame(db,
FRE_abs,
FRE_tit,
N_aut)
write.csv(db2, "Data_limnology.csv")
###########################################################################
# starting from already prepared data
# db2 <- read.csv(file="Data_limnology.csv", dec='.', header=TRUE, as.is=T)
###########################################################################
# Seeing some trends ------------------------------------------------------
# create datasets for each topic
db_LI <- subset(db2,keyword.search=="limnolog")
db_OC <- subset(db2,keyword.search=="oceanograph")
db_LA <- subset(db2,keyword.search=="lake_ecolog")
db_HY <- subset(db2,keyword.search=="hydrobiolog")
dim(db_LI)
dim(db_OC)
dim(db_LA)
dim(db_HY)
# yearly trends for each topic
db_LI_year <-
db_LI %>%
group_by(Publication.Year) %>%
summarise(FRE_mean_abs = mean(FRE_abs, na.rm=T),
FRE_se_abs = std(FRE_abs),
FRE_mean_tit = mean(FRE_tit),
FRE_se_tit = std(FRE_tit),
N_aut_mean = mean(N_aut),
N_aut_se = std(N_aut),
N_pages_mean = mean(Number.of.Pages[Number.of.Pages>0]),
N_pages_se = std(Number.of.Pages[Number.of.Pages>0]),
N_citedref_mean = mean(Cited.Reference.Count),
N_citedref_se = std(Cited.Reference.Count),
N_citations_mean = mean(Times.Cited..WoS.Core),
N_citations_se = std(Times.Cited..WoS.Core),
N_papers = length(Publication.Year),
)
db_OC_year <-
db_OC %>%
group_by(Publication.Year) %>%
summarise(FRE_mean_abs = mean(FRE_abs, na.rm=T),
FRE_se_abs = std(FRE_abs),
FRE_mean_tit = mean(FRE_tit),
FRE_se_tit = std(FRE_tit),
N_aut_mean = mean(N_aut),
N_aut_se = std(N_aut),
N_pages_mean = mean(Number.of.Pages[Number.of.Pages>0]),
N_pages_se = std(Number.of.Pages[Number.of.Pages>0]),
N_citedref_mean = mean(Cited.Reference.Count),
N_citedref_se = std(Cited.Reference.Count),
N_citations_mean = mean(Times.Cited..WoS.Core),
N_citations_se = std(Times.Cited..WoS.Core),
N_papers = length(Publication.Year),
)
db_LA_year <-
db_LA %>%
group_by(Publication.Year) %>%
summarise(FRE_mean_abs = mean(FRE_abs, na.rm=T),
FRE_se_abs = std(FRE_abs),
FRE_mean_tit = mean(FRE_tit),
FRE_se_tit = std(FRE_tit),
N_aut_mean = mean(N_aut),
N_aut_se = std(N_aut),
N_pages_mean = mean(Number.of.Pages[Number.of.Pages>0]),
N_pages_se = std(Number.of.Pages[Number.of.Pages>0]),
N_citedref_mean = mean(Cited.Reference.Count),
N_citedref_se = std(Cited.Reference.Count),
N_citations_mean = mean(Times.Cited..WoS.Core),
N_citations_se = std(Times.Cited..WoS.Core),
N_papers = length(Publication.Year),
)
db_HY_year <-
db_HY %>%
group_by(Publication.Year) %>%
summarise(FRE_mean_abs = mean(FRE_abs, na.rm=T),
FRE_se_abs = std(FRE_abs),
FRE_mean_tit = mean(FRE_tit),
FRE_se_tit = std(FRE_tit),
N_aut_mean = mean(N_aut),
N_aut_se = std(N_aut),
N_pages_mean = mean(Number.of.Pages[Number.of.Pages>0]),
N_pages_se = std(Number.of.Pages[Number.of.Pages>0]),
N_citedref_mean = mean(Cited.Reference.Count),
N_citedref_se = std(Cited.Reference.Count),
N_citations_mean = mean(Times.Cited..WoS.Core),
N_citations_se = std(Times.Cited..WoS.Core),
N_papers = length(Publication.Year),
)
dim(db_LI_year)
dim(db_OC_year)
dim(db_LA_year)
dim(db_HY_year)
summary(db_LI_year)
summary(db_OC_year)
summary(db_LA_year)
summary(db_HY_year)
##### PLOTS ###############################################################
# Readability abstract plot -----------------------------------------------
(FRE_a_LI <- ggplot(data=db_LI_year, aes(x=Publication.Year, y=FRE_mean_abs)) +
geom_line(aes(x=Publication.Year, y=FRE_mean_abs),linetype = 1,col="blue",alpha=1) +
#geom_errorbar(aes(ymin=FRE_mean_abs-FRE_se_abs, ymax=FRE_mean_abs+FRE_se_abs), width=.8,col="blue") +
#geom_point(size = 2,col="blue")+
labs(title= "Limnology",
x = NULL,
y = "Readability [mean]") + #Flesch-Kincaid Readability Ease
scale_x_continuous(breaks = c(seq(from=1960,to=2020,by = 10)),
labels = c("1960","1970","1980","1990","2000","2010","2020"))+
theme_bw()+
theme(
legend.position = "none",
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
panel.grid = element_blank(),
plot.caption = element_text(size = 10, color = "gray50"),
plot.title = element_text(face="bold", size=12)
)
)
(FRE_a_OC <- ggplot(data=db_OC_year, aes(x=Publication.Year, y=FRE_mean_abs)) +
geom_line(aes(x=Publication.Year, y=FRE_mean_abs),linetype = 1,col="blue",alpha=1) +
#geom_errorbar(aes(ymin=FRE_mean_abs-FRE_se_abs, ymax=FRE_mean_abs+FRE_se_abs), width=.8,col="blue") +
#geom_point(size = 2,col="blue")+
labs(title= "Oceanography",
x = NULL,
y = "Readability [mean]") + #Flesch-Kincaid Readability Ease
scale_x_continuous(breaks = c(seq(from=1960,to=2020,by = 10)),
labels = c("1960","1970","1980","1990","2000","2010","2020"))+
theme_bw()+
theme(
legend.position = "none",
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
panel.grid = element_blank(),
plot.caption = element_text(size = 10, color = "gray50"),
plot.title = element_text(face="bold", size=12)
)
)
(FRE_a_LA <- ggplot(data=db_LA_year, aes(x=Publication.Year, y=FRE_mean_abs)) +
geom_line(aes(x=Publication.Year, y=FRE_mean_abs),linetype = 1,col="blue",alpha=1) +
#geom_errorbar(aes(ymin=FRE_mean_abs-FRE_se_abs, ymax=FRE_mean_abs+FRE_se_abs), width=.8,col="blue") +
#geom_point(size = 2,col="blue")+
labs(title= "Lake Ecology",
x = NULL,
y = "Readability [mean]") + #Flesch-Kincaid Readability Ease
scale_x_continuous(breaks = c(seq(from=1960,to=2020,by = 10)),
labels = c("1960","1970","1980","1990","2000","2010","2020"))+
theme_bw()+
theme(
legend.position = "none",
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
panel.grid = element_blank(),
plot.caption = element_text(size = 10, color = "gray50"),
plot.title = element_text(face="bold", size=12)
)
)
(FRE_a_HY <- ggplot(data=db_HY_year, aes(x=Publication.Year, y=FRE_mean_abs)) +
geom_line(aes(x=Publication.Year, y=FRE_mean_abs),linetype = 1,col="blue",alpha=1) +
#geom_errorbar(aes(ymin=FRE_mean_abs-FRE_se_abs, ymax=FRE_mean_abs+FRE_se_abs), width=.8,col="blue") +
#geom_point(size = 2,col="blue")+
labs(title= "Hydrobiology",
x = NULL,
y = "Readability [mean]") + #Flesch-Kincaid Readability Ease
scale_x_continuous(breaks = c(seq(from=1960,to=2020,by = 10)),
labels = c("1960","1970","1980","1990","2000","2010","2020"))+
theme_bw()+
theme(
legend.position = "none",
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
panel.grid = element_blank(),
plot.caption = element_text(size = 10, color = "gray50"),
plot.title = element_text(face="bold", size=12)
)
)
# four plots together
FRE_a1 <- ggplotGrob(FRE_a_LI)
FRE_a2 <- ggplotGrob(FRE_a_OC)
FRE_a3 <- ggplotGrob(FRE_a_LA)
FRE_a4 <- ggplotGrob(FRE_a_HY)
grid.arrange(
FRE_a1, FRE_a2, FRE_a3, FRE_a4,
nrow=4,
top="Readability of Abstract"
)
# Readability title plot --------------------------------------------------
(FRE_t_LI <- ggplot(data=db_LI_year, aes(x=Publication.Year, y=FRE_mean_tit)) +
geom_line(aes(x=Publication.Year, y=FRE_mean_tit),linetype = 1,col="blue",alpha=1) +
geom_errorbar(aes(ymin=FRE_mean_tit-FRE_se_tit, ymax=FRE_mean_tit+FRE_se_tit), width=.8,col="blue") +
geom_point(size = 2,col="blue")+
labs(title= "Limnology",
x = NULL,
y = "Readability [mean +/- S.E.]") +
scale_x_continuous(breaks = c(seq(from=1960,to=2020,by = 10)),
labels = c("1960","1970","1980","1990","2000","2010","2020"))+
theme_bw()+
theme(
legend.position = "none",
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
panel.grid = element_blank(),
plot.caption = element_text(size = 10, color = "gray50"),
plot.title = element_text(face="bold", size=12)
)
)
(FRE_t_OC <- ggplot(data=db_OC_year, aes(x=Publication.Year, y=FRE_mean_tit)) +
geom_line(aes(x=Publication.Year, y=FRE_mean_tit),linetype = 1,col="blue",alpha=1) +
geom_errorbar(aes(ymin=FRE_mean_tit-FRE_se_tit, ymax=FRE_mean_tit+FRE_se_tit), width=.8,col="blue") +
geom_point(size = 2,col="blue")+
labs(title= "Oceanography",
x = NULL,
y = "Readability [mean +/- S.E.]") +
scale_x_continuous(breaks = c(seq(from=1960,to=2020,by = 10)),
labels = c("1960","1970","1980","1990","2000","2010","2020"))+
theme_bw()+
theme(
legend.position = "none",
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
panel.grid = element_blank(),
plot.caption = element_text(size = 10, color = "gray50"),
plot.title = element_text(face="bold", size=12)
)
)
(FRE_t_LA <- ggplot(data=db_LA_year, aes(x=Publication.Year, y=FRE_mean_tit)) +
geom_line(aes(x=Publication.Year, y=FRE_mean_tit),linetype = 1,col="blue",alpha=1) +
geom_errorbar(aes(ymin=FRE_mean_tit-FRE_se_tit, ymax=FRE_mean_tit+FRE_se_tit), width=.8,col="blue") +
geom_point(size = 2,col="blue")+
labs(title= "Lake Ecology",
x = NULL,
y = "Readability [mean +/- S.E.]") +
scale_x_continuous(breaks = c(seq(from=1960,to=2020,by = 10)),
labels = c("1960","1970","1980","1990","2000","2010","2020"))+
theme_bw()+
theme(
legend.position = "none",
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
panel.grid = element_blank(),
plot.caption = element_text(size = 10, color = "gray50"),
plot.title = element_text(face="bold", size=12)
)
)
(FRE_t_HY <- ggplot(data=db_HY_year, aes(x=Publication.Year, y=FRE_mean_tit)) +
geom_line(aes(x=Publication.Year, y=FRE_mean_tit),linetype = 1,col="blue",alpha=1) +
geom_errorbar(aes(ymin=FRE_mean_tit-FRE_se_tit, ymax=FRE_mean_tit+FRE_se_tit), width=.8,col="blue") +
geom_point(size = 2,col="blue")+
labs(title= "Hydrobiology",
x = NULL,
y = "Readability [mean +/- S.E.]") +
scale_x_continuous(breaks = c(seq(from=1960,to=2020,by = 10)),
labels = c("1960","1970","1980","1990","2000","2010","2020"))+
theme_bw()+
theme(
legend.position = "none",
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
panel.grid = element_blank(),
plot.caption = element_text(size = 10, color = "gray50"),
plot.title = element_text(face="bold", size=12)
)
)
# four plots together
FRE_t1 <- ggplotGrob(FRE_t_LI)
FRE_t2 <- ggplotGrob(FRE_t_OC)
FRE_t3 <- ggplotGrob(FRE_t_LA)
FRE_t4 <- ggplotGrob(FRE_t_HY)
grid.arrange(
FRE_t1, FRE_t2, FRE_t3, FRE_t4,
nrow=4,
top="Readability of Title"
)
# Number of authors --------------------------------------------------------
(Nau_LI <- ggplot(data=db_LI_year, aes(x=Publication.Year, y=N_aut_mean)) +
geom_line(aes(x=Publication.Year, y=N_aut_mean),linetype = 1,col="blue",alpha=1) +
geom_errorbar(aes(ymin=N_aut_mean-N_aut_se, ymax=N_aut_mean+N_aut_se), width=.8,col="blue") +
geom_point(size = 2,col="blue")+
labs(title= "Limnology",
x = NULL,
y = "Number of authors [mean +/- S.E.]") +
scale_x_continuous(breaks = c(seq(from=1960, to=2020, by=10)),
labels = c("1960","1970","1980","1990","2000","2010","2020"))+
ylim(1,7) +
theme_bw()+
theme(
legend.position = "none",
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
panel.grid = element_blank(),
plot.caption = element_text(size = 10, color = "gray50"),
plot.title = element_text(face="bold", size=12)
)
)
(Nau_OC <- ggplot(data=db_OC_year, aes(x=Publication.Year, y=N_aut_mean)) +
geom_line(aes(x=Publication.Year, y=N_aut_mean),linetype = 1,col="blue",alpha=1) +
geom_errorbar(aes(ymin=N_aut_mean-N_aut_se, ymax=N_aut_mean+N_aut_se), width=.8,col="blue") +
geom_point(size = 2,col="blue")+
labs(title= "Oceanography",
x = NULL,
y = "Number of authors [mean +/- S.E.]") +
scale_x_continuous(breaks = c(seq(from=1960,to=2020,by = 10)),
labels = c("1960","1970","1980","1990","2000","2010","2020"))+
ylim(1,7) +
theme_bw()+
theme(
legend.position = "none",
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
panel.grid = element_blank(),
plot.caption = element_text(size = 10, color = "gray50"),
plot.title = element_text(face="bold", size=12)
)
)
(Nau_LA <- ggplot(data=db_LA_year, aes(x=Publication.Year, y=N_aut_mean)) +
geom_line(aes(x=Publication.Year, y=N_aut_mean),linetype = 1,col="blue",alpha=1) +
geom_errorbar(aes(ymin=N_aut_mean-N_aut_se, ymax=N_aut_mean+N_aut_se), width=.8,col="blue") +
geom_point(size = 2,col="blue")+
labs(title= "Lake Ecology",
x = NULL,
y = "Number of authors [mean +/- S.E.]") +
scale_x_continuous(breaks = c(seq(from=1960,to=2020,by = 10)),
labels = c("1960","1970","1980","1990","2000","2010","2020"))+
ylim(1,7) +
theme_bw()+
theme(
legend.position = "none",
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
panel.grid = element_blank(),
plot.caption = element_text(size = 10, color = "gray50"),
plot.title = element_text(face="bold", size=12)
)
)
(Nau_HY <- ggplot(data=db_HY_year, aes(x=Publication.Year, y=N_aut_mean)) +
geom_line(aes(x=Publication.Year, y=N_aut_mean),linetype = 1,col="blue",alpha=1) +
geom_errorbar(aes(ymin=N_aut_mean-N_aut_se, ymax=N_aut_mean+N_aut_se), width=.8,col="blue") +
geom_point(size = 2,col="blue")+
labs(title= "Hydrobiology",
x = NULL,
y = "Number of authors [mean +/- S.E.]") +
scale_x_continuous(breaks = c(seq(from=1960,to=2020,by = 10)),
labels = c("1960","1970","1980","1990","2000","2010","2020"))+
ylim(1,7) +
theme_bw()+
theme(
legend.position = "none",
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
panel.grid = element_blank(),
plot.caption = element_text(size = 10, color = "gray50"),
plot.title = element_text(face="bold", size=12)
)
)
# four plots together
Nau_1 <- ggplotGrob(Nau_LI)
Nau_2 <- ggplotGrob(Nau_OC)
Nau_3 <- ggplotGrob(Nau_LA)
Nau_4 <- ggplotGrob(Nau_HY)
grid.arrange(
Nau_1, Nau_2, Nau_3, Nau_4,
nrow=4,
top="Number of authors"
)
# Number of pages ---------------------------------------------------------
(Npa_LI <- ggplot(data=db_LI_year, aes(x=Publication.Year, y=N_pages_mean)) +
geom_line(aes(x=Publication.Year, y=N_pages_mean),linetype = 1,col="blue",alpha=1) +
geom_errorbar(aes(ymin=N_pages_mean-N_pages_se, ymax=N_pages_mean+N_pages_se), width=.8,col="blue") +
geom_point(size = 2,col="blue")+
labs(title= "Limnology",
x = NULL,
y = "Number of authors [mean +/- S.E.]") +
scale_x_continuous(breaks = c(seq(from=1960,to=2020,by = 10)),
labels = c("1960","1970","1980","1990","2000","2010","2020"))+
ylim(1,30) +
theme_bw()+
theme(
legend.position = "none",
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
panel.grid = element_blank(),
plot.caption = element_text(size = 10, color = "gray50"),
plot.title = element_text(face="bold", size=12)
)
)
(Npa_OC <- ggplot(data=db_OC_year, aes(x=Publication.Year, y=N_pages_mean)) +
geom_line(aes(x=Publication.Year, y=N_pages_mean),linetype = 1,col="blue",alpha=1) +
geom_errorbar(aes(ymin=N_pages_mean-N_pages_se, ymax=N_pages_mean+N_pages_se), width=.8,col="blue") +
geom_point(size = 2,col="blue")+
labs(title= "Oceanography",
x = NULL,
y = "Number of authors [mean +/- S.E.]") +
scale_x_continuous(breaks = c(seq(from=1960,to=2020,by = 10)),
labels = c("1960","1970","1980","1990","2000","2010","2020"))+
ylim(1,30) +
theme_bw()+
theme(
legend.position = "none",
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
panel.grid = element_blank(),
plot.caption = element_text(size = 10, color = "gray50"),
plot.title = element_text(face="bold", size=12)
)
)
(Npa_LA <- ggplot(data=db_LA_year, aes(x=Publication.Year, y=N_pages_mean)) +
geom_line(aes(x=Publication.Year, y=N_pages_mean),linetype = 1,col="blue",alpha=1) +
geom_errorbar(aes(ymin=N_pages_mean-N_pages_se, ymax=N_pages_mean+N_pages_se), width=.8,col="blue") +
geom_point(size = 2,col="blue")+
labs(title= "Lake Ecology",
x = NULL,
y = "Number of authors [mean +/- S.E.]") +
scale_x_continuous(breaks = c(seq(from=1960,to=2020,by = 10)),
labels = c("1960","1970","1980","1990","2000","2010","2020"))+
ylim(1,30) +
theme_bw()+
theme(
legend.position = "none",
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
panel.grid = element_blank(),
plot.caption = element_text(size = 10, color = "gray50"),
plot.title = element_text(face="bold", size=12)
)
)
(Npa_HY <- ggplot(data=db_HY_year, aes(x=Publication.Year, y=N_pages_mean)) +
geom_line(aes(x=Publication.Year, y=N_pages_mean),linetype = 1,col="blue",alpha=1) +
geom_errorbar(aes(ymin=N_pages_mean-N_pages_se, ymax=N_pages_mean+N_pages_se), width=.8,col="blue") +
geom_point(size = 2,col="blue")+
labs(title= "Hydrobiology",
x = NULL,
y = "Number of authors [mean +/- S.E.]") +
scale_x_continuous(breaks = c(seq(from=1960,to=2020,by = 10)),
labels = c("1960","1970","1980","1990","2000","2010","2020"))+
ylim(1,30) +
theme_bw()+
theme(
legend.position = "none",
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
panel.grid = element_blank(),
plot.caption = element_text(size = 10, color = "gray50"),
plot.title = element_text(face="bold", size=12)
)
)
# four plots together
Npa_1 <- ggplotGrob(Npa_LI)
Npa_2 <- ggplotGrob(Npa_OC)
Npa_3 <- ggplotGrob(Npa_LA)
Npa_4 <- ggplotGrob(Npa_HY)
grid.arrange(
Npa_1, Npa_2, Npa_3, Npa_4,
nrow=4,
top="Number of pages"
)
# Number of cited references ----------------------------------------------
(Ncr_LI <- ggplot(data=db_LI_year, aes(x=Publication.Year, y=N_citedref_mean)) +
geom_line(aes(x=Publication.Year, y=N_citedref_mean),linetype = 1,col="blue",alpha=1) +
geom_errorbar(aes(ymin=N_citedref_mean-N_citedref_se, ymax=N_citedref_mean+N_citedref_se), width=.8,col="blue") +
geom_point(size = 2,col="blue")+
labs(title= "Limnology",
x = NULL,
y = "Number of cited references [mean +/- S.E.]") +
scale_x_continuous(breaks = c(seq(from=1960,to=2020,by = 10)),
labels = c("1960","1970","1980","1990","2000","2010","2020"))+
ylim(1,80) +
theme_bw()+
theme(
legend.position = "none",
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
panel.grid = element_blank(),
plot.caption = element_text(size = 10, color = "gray50"),
plot.title = element_text(face="bold", size=12)
)
)
(Ncr_OC <- ggplot(data=db_OC_year, aes(x=Publication.Year, y=N_citedref_mean)) +
geom_line(aes(x=Publication.Year, y=N_citedref_mean),linetype = 1,col="blue",alpha=1) +
geom_errorbar(aes(ymin=N_citedref_mean-N_citedref_se, ymax=N_citedref_mean+N_citedref_se), width=.8,col="blue") +
geom_point(size = 2,col="blue")+
labs(title= "Oceanography",
x = NULL,
y = "Number of cited references [mean +/- S.E.]") +
scale_x_continuous(breaks = c(seq(from=1960,to=2020,by = 10)),
labels = c("1960","1970","1980","1990","2000","2010","2020"))+
ylim(1,80) +
theme_bw()+
theme(
legend.position = "none",
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
panel.grid = element_blank(),
plot.caption = element_text(size = 10, color = "gray50"),
plot.title = element_text(face="bold", size=12)
)
)
(Ncr_LA <- ggplot(data=db_LA_year, aes(x=Publication.Year, y=N_citedref_mean)) +
geom_line(aes(x=Publication.Year, y=N_citedref_mean),linetype = 1,col="blue",alpha=1) +
geom_errorbar(aes(ymin=N_citedref_mean-N_citedref_se, ymax=N_citedref_mean+N_citedref_se), width=.8,col="blue") +
geom_point(size = 2,col="blue")+
labs(title= "Lake Ecology",
x = NULL,
y = "Number of cited references [mean +/- S.E.]") +
scale_x_continuous(breaks = c(seq(from=1960,to=2020,by = 10)),
labels = c("1960","1970","1980","1990","2000","2010","2020"))+
ylim(1,80) +
theme_bw()+
theme(
legend.position = "none",
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
panel.grid = element_blank(),
plot.caption = element_text(size = 10, color = "gray50"),
plot.title = element_text(face="bold", size=12)
)
)
(Ncr_HY <- ggplot(data=db_HY_year, aes(x=Publication.Year, y=N_citedref_mean)) +
geom_line(aes(x=Publication.Year, y=N_citedref_mean),linetype = 1,col="blue",alpha=1) +
geom_errorbar(aes(ymin=N_citedref_mean-N_citedref_se, ymax=N_citedref_mean+N_citedref_se), width=.8,col="blue") +
geom_point(size = 2,col="blue")+
labs(title= "Hydrobiology",
x = NULL,
y = "Number of cited references [mean +/- S.E.]") +
scale_x_continuous(breaks = c(seq(from=1960,to=2020,by = 10)),
labels = c("1960","1970","1980","1990","2000","2010","2020"))+
ylim(1,80) +
theme_bw()+
theme(
legend.position = "none",
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
panel.grid = element_blank(),
plot.caption = element_text(size = 10, color = "gray50"),
plot.title = element_text(face="bold", size=12)
)
)
# four plots together
Ncr_1 <- ggplotGrob(Ncr_LI)
Ncr_2 <- ggplotGrob(Ncr_OC)
Ncr_3 <- ggplotGrob(Ncr_LA)
Ncr_4 <- ggplotGrob(Ncr_HY)
grid.arrange(
Ncr_1, Ncr_2, Ncr_3, Ncr_4,
nrow=4,
top="Number of cited references"
)
# Number of citations -----------------------------------------------------
(Nci_LI <- ggplot(data=db_LI_year, aes(x=Publication.Year, y=N_citations_mean)) +
geom_line(aes(x=Publication.Year, y=N_citations_mean),linetype = 1,col="blue",alpha=1) +
geom_errorbar(aes(ymin=N_citations_mean-N_citations_se, ymax=N_citations_mean+N_citations_se), width=.8,col="blue") +
geom_point(size = 2,col="blue")+
labs(title= "Limnology",
x = NULL,
y = "Number of citations [mean +/- S.E.]") +
scale_x_continuous(breaks = c(seq(from=1960,to=2020,by = 10)),
labels = c("1960","1970","1980","1990","2000","2010","2020"))+
ylim(1,80) +
theme_bw()+
theme(
legend.position = "none",
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
panel.grid = element_blank(),
plot.caption = element_text(size = 10, color = "gray50"),
plot.title = element_text(face="bold", size=12)
)
)
(Nci_OC <- ggplot(data=db_OC_year, aes(x=Publication.Year, y=N_citations_mean)) +
geom_line(aes(x=Publication.Year, y=N_citations_mean),linetype = 1,col="blue",alpha=1) +
geom_errorbar(aes(ymin=N_citations_mean-N_citations_se, ymax=N_citations_mean+N_citations_se), width=.8,col="blue") +
geom_point(size = 2,col="blue")+
labs(title= "Oceanography",
x = NULL,
y = "Number of citations [mean +/- S.E.]") +
scale_x_continuous(breaks = c(seq(from=1960,to=2020,by = 10)),
labels = c("1960","1970","1980","1990","2000","2010","2020"))+
ylim(1,80) +
theme_bw()+
theme(
legend.position = "none",
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
panel.grid = element_blank(),
plot.caption = element_text(size = 10, color = "gray50"),
plot.title = element_text(face="bold", size=12)
)
)
(Nci_LA <- ggplot(data=db_LA_year, aes(x=Publication.Year, y=N_citations_mean)) +
geom_line(aes(x=Publication.Year, y=N_citations_mean),linetype = 1,col="blue",alpha=1) +
geom_errorbar(aes(ymin=N_citations_mean-N_citations_se, ymax=N_citations_mean+N_citations_se), width=.8,col="blue") +
geom_point(size = 2,col="blue")+
labs(title= "Lake Ecology",
x = NULL,
y = "Number of citations [mean +/- S.E.]") +
scale_x_continuous(breaks = c(seq(from=1960,to=2020,by = 10)),
labels = c("1960","1970","1980","1990","2000","2010","2020"))+
ylim(1,80) +
theme_bw()+
theme(
legend.position = "none",
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
panel.grid = element_blank(),
plot.caption = element_text(size = 10, color = "gray50"),
plot.title = element_text(face="bold", size=12)
)
)
(Nci_HY <- ggplot(data=db_HY_year, aes(x=Publication.Year, y=N_citations_mean)) +
geom_line(aes(x=Publication.Year, y=N_citations_mean),linetype = 1,col="blue",alpha=1) +
geom_errorbar(aes(ymin=N_citations_mean-N_citations_se, ymax=N_citations_mean+N_citations_se), width=.8,col="blue") +
geom_point(size = 2,col="blue")+
labs(title= "Hydrobiology",
x = NULL,
y = "Number of citations [mean +/- S.E.]") +
scale_x_continuous(breaks = c(seq(from=1960,to=2020,by = 10)),
labels = c("1960","1970","1980","1990","2000","2010","2020"))+
ylim(1,80) +
theme_bw()+
theme(
legend.position = "none",
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
panel.grid = element_blank(),
plot.caption = element_text(size = 10, color = "gray50"),
plot.title = element_text(face="bold", size=12)
)
)
# four plots together
Nci_1 <- ggplotGrob(Nci_LI)
Nci_2 <- ggplotGrob(Nci_OC)
Nci_3 <- ggplotGrob(Nci_LA)
Nci_4 <- ggplotGrob(Nci_HY)
grid.arrange(
Nci_1, Nci_2, Nci_3, Nci_4,
nrow=4,
top="Number of citations"
)
# Number of papers --------------------------------------------------------
(Npa_LI <- ggplot(data=db_LI_year, aes(x=Publication.Year, y=N_papers)) +
geom_line(aes(x=Publication.Year, y= N_papers),linetype = 1,col="blue",alpha=1) +
#geom_errorbar(aes(ymin=FRE_mean_abs-FRE_se_abs, ymax=FRE_mean_abs+FRE_se_abs), width=.8,col="blue") +
#geom_point(size = 2,col="blue")+
labs(title= "Limnology",
x = NULL,
y = "Number of papers") +
scale_x_continuous(breaks = c(seq(from=1960,to=2020,by = 10)),
labels = c("1960","1970","1980","1990","2000","2010","2020"))+
ylim(1,2000) +
theme_bw()+
theme(
legend.position = "none",
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
panel.grid = element_blank(),
plot.caption = element_text(size = 10, color = "gray50"),
plot.title = element_text(face="bold", size=12)
)
)
(Npa_OC <- ggplot(data=db_OC_year, aes(x=Publication.Year, y=N_papers)) +
geom_line(aes(x=Publication.Year, y= N_papers),linetype = 1,col="blue",alpha=1) +
#geom_errorbar(aes(ymin=FRE_mean_abs-FRE_se_abs, ymax=FRE_mean_abs+FRE_se_abs), width=.8,col="blue") +
#geom_point(size = 2,col="blue")+
labs(title= "Oceanography",
x = NULL,
y = "Number of papers") +
scale_x_continuous(breaks = c(seq(from=1960,to=2020,by = 10)),
labels = c("1960","1970","1980","1990","2000","2010","2020"))+
ylim(1,2000) +
theme_bw()+
theme(
legend.position = "none",
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
panel.grid = element_blank(),
plot.caption = element_text(size = 10, color = "gray50"),
plot.title = element_text(face="bold", size=12)
)
)
(Npa_LA <- ggplot(data=db_LA_year, aes(x=Publication.Year, y=N_papers)) +
geom_line(aes(x=Publication.Year, y= N_papers),linetype = 1,col="blue",alpha=1) +
#geom_errorbar(aes(ymin=FRE_mean_abs-FRE_se_abs, ymax=FRE_mean_abs+FRE_se_abs), width=.8,col="blue") +
#geom_point(size = 2,col="blue")+
labs(title= "Lake Ecology",
x = NULL,
y = "Number of papers") +
scale_x_continuous(breaks = c(seq(from=1960,to=2020,by = 10)),
labels = c("1960","1970","1980","1990","2000","2010","2020"))+
ylim(1,2000) +
theme_bw()+
theme(
legend.position = "none",
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
panel.grid = element_blank(),
plot.caption = element_text(size = 10, color = "gray50"),
plot.title = element_text(face="bold", size=12)
)
)
(Npa_HY <- ggplot(data=db_HY_year, aes(x=Publication.Year, y=N_papers)) +
geom_line(aes(x=Publication.Year, y= N_papers),linetype = 1,col="blue",alpha=1) +
#geom_errorbar(aes(ymin=FRE_mean_abs-FRE_se_abs, ymax=FRE_mean_abs+FRE_se_abs), width=.8,col="blue") +
#geom_point(size = 2,col="blue")+
labs(title= "Hydrobiology",
x = NULL,
y = "Number of papers") +
scale_x_continuous(breaks = c(seq(from=1960,to=2020,by = 10)),
labels = c("1960","1970","1980","1990","2000","2010","2020"))+
ylim(1,2000) +
theme_bw()+
theme(
legend.position = "none",
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
panel.grid = element_blank(),
plot.caption = element_text(size = 10, color = "gray50"),
plot.title = element_text(face="bold", size=12)
)
)
# four plots together
Npa_1 <- ggplotGrob(Npa_LI)
Npa_2 <- ggplotGrob(Npa_OC)
Npa_3 <- ggplotGrob(Npa_LA)
Npa_4 <- ggplotGrob(Npa_HY)
grid.arrange(
Npa_1, Npa_2, Npa_3, Npa_4,
nrow=4,
top="Number of papers"
)
# ANALYSES FOR NUMBER OF PAPERS
# prepare dataset
merged <- rbind(db_LI_year, db_OC_year, db_LA_year, db_HY_year)
merged$topic <- as.factor(c(rep('limnology',nrow(db_LI_year)),
rep('oceanography',nrow(db_OC_year)),
rep('lake ecology',nrow(db_LA_year)),
rep('hydrobiology',nrow(db_HY_year))))
summary(merged)
names(merged)
# load libraries
library('car')
library('emmeans')
library('gam')
library('MASS')
library('performance')
# run gam model
m_pap <- gam::gam(N_papers ~ topic*s(Publication.Year),
family=poisson, data=merged)
performance::check_model(m_pap)
summary(m_pap)
pairs(emmeans::emmeans(m_pap, ~topic*s(Publication.Year)),
simple="topic")
# run nb.glm model
m_pap2 <- MASS::glm.nb(N_papers ~ topic*Publication.Year, data=merged)
performance::check_model(m_pap2)
car::Anova(m_pap2)
pairs(emmeans::emmeans(m_pap2, ~topic*Publication.Year),
simple="topic")
# plot the results
ggplot2::ggplot(merged, aes(x=Publication.Year, y=N_papers)) +
theme_light() +
geom_point(aes(colour=topic)) +
geom_smooth(aes(colour=topic)) +
scale_y_continuous(trans='log2') +
scale_x_continuous(breaks = c(seq(from=1960, to=2020, by=10)),
labels=c("1960","1970","1980","1990","2000","2010","2020")) +
labs(x=NULL, y="Number of papers") +
scale_color_brewer(palette="Paired") | /lib/limnology/limnology review.R | permissive | JNUWWB/metaR | R | false | false | 41,600 | r | ###########################################################################
## The use of the term ‘limnology’ and its scientometrics consequences ####
## Diego Fontaneto, Alejandro Martínez, Stefano Mammola, Aldo Marchetto ###
###########################################################################
## R code to prepare the data:
# Working directory -------------------------------------------------------
setwd("") # <- change me :)
# Loading R packages ------------------------------------------------------
library("bazar")
library("dplyr")
library("tidyr")
library("ggplot2")
library("gridExtra")
library("gtable")
library("grid")
library("jaod")
library("quanteda")
# Functions ---------------------------------------------------------------
## Function for checking if a number is a whole number
is.wholenumber <- function(x, tol = .Machine$double.eps^0.5) abs(x - round(x)) < tol
## Function for calculating the standard error
std <- function(x) sd(x)/sqrt(length(x))
## Function for replacing nan with na
is.nan.data.frame <- function(x)
do.call(cbind, lapply(x, is.nan))
## Function for calculating the Flesch reading ease
FRE_index <- function(wordCount, sentenceCount, syllableCount) {
FRE <- 206.835 - (1.015 * (wordCount / sentenceCount)) - (84.6 * (syllableCount / wordCount))
return(FRE)
}
## Function for cleaning abstracts
word.cleaner <- function(word.list, remove.punctuation=FALSE, remove.numbers=FALSE, split=FALSE, split.sep= " ") {
#check if right data is provided
if (class(word.list) != "character"){
word.list <- as.character(word.list)
warning("The input data as been converted as a character string")
}
if(split)
word.list <- strsplit(word.list, split.sep)[[1]]
#all lower case
word.list <- tolower(as.vector(word.list))
if(remove.punctuation){
# remove all punctuation except . , !, and ?
word.list <- gsub("[^[:alnum:][:space:](?|!|.)]", "\\1", word.list)
# Replace each dot that is in between word characters
word.list <- gsub("\\b\\.\\b", "\\1", word.list, perl=TRUE)
# Replace each dot that is in between letters
word.list <- gsub("(?<=\\p{L})\\.(?=\\p{L})", "\\1", word.list, perl=TRUE)
# Replace each dot that is in between word characters, but no in URLs
word.list <- gsub("(?:ht|f)tps?://\\S*(*SKIP)(*F)|\\b\\.\\b", "\\1", word.list, perl=TRUE)
#remove "et al."
word.list <- gsub("et al.", "et al", word.list)
#remove "Sic!"
word.list <- gsub("sic!", "sic", word.list)
}
if(remove.numbers)
word.list <- gsub('[0-9]+', '', word.list)
#remove extra white spaces
word.list <- trimws(word.list)
return(word.list)
}
#Example...
test <- "How Are you? Fine, thanks! The paper by Ripple et al.2017 is great. you can download it at www.bioscience.com (Sic!), and enjoy it. v.e.r.y c.o.o.o.l thanks."
word.cleaner(test, remove.punctuation = TRUE, remove.numbers= TRUE, split = FALSE)
## function for counting words, syllabes, and sentences
abstract.counter <- function(word.list) {
if (class(word.list) != "character"){
word.list <- as.character(word.list)
warning("The input data as been converted as a character string")
}
wordCount <- length(unlist(strsplit(as.character(tokens(word.list, remove_punct = TRUE)), " ")))
syllableCount <- sum( na.omit(quanteda::nsyllable(tokens(word.list, remove_punct = TRUE))$text1) )
sentenceCount <- length( unlist(strsplit(word.list, "[.]")) )
output <- list(wordCount,syllableCount,sentenceCount) ; names(output) <- c("wordCount","syllableCount","sentenceCount")
return(output)
}
#Example...
warning_insect <- "Here we build on the manifesto 'World Scientists' Warning to Humanity, issued by the Alliance of World Scientists. As a group of conservation biologists deeply concerned about the decline of insect populations, we here review what we know about the drivers of insect extinctions, their consequences, and how extinctions can negatively impact humanity. We are causing insect extinctions by driving habitat loss, degradation, and fragmentation, use of polluting and harmful substances, the spread of invasive species, global climate change, direct overexploitation, and co-extinction of species dependent on other species. With insect extinctions, we lose much more than species. We lose abundance and biomass of insects, diversity across space and time with consequent homogenization, large parts of the tree of life, unique ecological functions and traits, and fundamental parts of extensive networks of biotic interactions. Such losses lead to the decline of key ecosystem services on which humanity depends. From pollination and decomposition, to being resources for new medicines, habitat quality indication and many others, insects provide essential and irreplaceable services. We appeal for urgent action to close key knowledge gaps and curb insect extinctions. An investment in research"
example <- word.cleaner(warning_insect, remove.punctuation = TRUE, remove.numbers= TRUE, split = FALSE)
example <- abstract.counter(example)
FRE_index(wordCount=example$wordCount,
sentenceCount=example$sentenceCount,
syllableCount=example$syllableCount)
# Loading the databases #############################
db <- read.csv(file = "db_all.txt", sep = '\t', dec = '.', header = TRUE)
str(db)
dim(db)
# remove missing years (likely papers still in press in 2021)
dim(db)
db <- db %>% drop_na(Publication.Year)
dim(db)
# remove papers in 2021
dim(db)
db <- db[db$Publication.Year<2021,]
dim(db)
# restrict to journal papers only
dim(db)
db <- db[db$Publication.Type=="J",]
dim(db)
#calculating the number of authors and the FRE readability index for each abstract and title
N_aut <- c()
FRE_abs <- c()
FRE_tit <- c()
#run from here:
for (i in 1:nrow(db)){
db_i <- db[i,]
#Number of authors
N_aut <- append(N_aut,length(word.cleaner(word.list=as.character(db_i$Authors),
remove.punctuation=FALSE,
remove.numbers=FALSE,
split=TRUE, split.sep= ";")) )
#Readability abstract & title
if(is.na(db_i$Abstract) == TRUE){
FRE_abs_i <- NA
}
else {
clean_abs <- word.cleaner(as.character(db_i$Abstract), remove.punctuation = TRUE, remove.numbers= TRUE, split = FALSE)
clean_abs <- abstract.counter(clean_abs)
FRE_abs_i <- FRE_index(wordCount = clean_abs$wordCount, sentenceCount = clean_abs$sentenceCount, syllableCount = clean_abs$syllableCount)
}
if(is.na(db_i$Article.Title) == TRUE){
FRE_tit_i <- NA
}
else {
clean_tit <- word.cleaner(as.character(db_i$Article.Title), remove.punctuation = TRUE, remove.numbers= TRUE, split = FALSE)
clean_tit <- abstract.counter(clean_tit)
FRE_tit_i <- FRE_index(wordCount = clean_tit$wordCount, sentenceCount = clean_tit$sentenceCount, syllableCount = clean_tit$syllableCount)
}
FRE_abs <- append(FRE_abs,FRE_abs_i)
FRE_tit <- append(FRE_tit,FRE_tit_i)
# Checking the advancing
if(is.wholenumber(i/100) == TRUE)
message(paste("Analyzed", as.character(i), "papers out of", as.character(nrow(db)),sep=" "))
}
#end
# store and save
db2 <- data.frame(db,
FRE_abs,
FRE_tit,
N_aut)
write.csv(db2, "Data_limnology.csv")
###########################################################################
# starting from already prepared data
# db2 <- read.csv(file="Data_limnology.csv", dec='.', header=TRUE, as.is=T)
###########################################################################
# Seeing some trends ------------------------------------------------------
# create datasets for each topic
db_LI <- subset(db2,keyword.search=="limnolog")
db_OC <- subset(db2,keyword.search=="oceanograph")
db_LA <- subset(db2,keyword.search=="lake_ecolog")
db_HY <- subset(db2,keyword.search=="hydrobiolog")
dim(db_LI)
dim(db_OC)
dim(db_LA)
dim(db_HY)
# yearly trends for each topic
db_LI_year <-
db_LI %>%
group_by(Publication.Year) %>%
summarise(FRE_mean_abs = mean(FRE_abs, na.rm=T),
FRE_se_abs = std(FRE_abs),
FRE_mean_tit = mean(FRE_tit),
FRE_se_tit = std(FRE_tit),
N_aut_mean = mean(N_aut),
N_aut_se = std(N_aut),
N_pages_mean = mean(Number.of.Pages[Number.of.Pages>0]),
N_pages_se = std(Number.of.Pages[Number.of.Pages>0]),
N_citedref_mean = mean(Cited.Reference.Count),
N_citedref_se = std(Cited.Reference.Count),
N_citations_mean = mean(Times.Cited..WoS.Core),
N_citations_se = std(Times.Cited..WoS.Core),
N_papers = length(Publication.Year),
)
db_OC_year <-
db_OC %>%
group_by(Publication.Year) %>%
summarise(FRE_mean_abs = mean(FRE_abs, na.rm=T),
FRE_se_abs = std(FRE_abs),
FRE_mean_tit = mean(FRE_tit),
FRE_se_tit = std(FRE_tit),
N_aut_mean = mean(N_aut),
N_aut_se = std(N_aut),
N_pages_mean = mean(Number.of.Pages[Number.of.Pages>0]),
N_pages_se = std(Number.of.Pages[Number.of.Pages>0]),
N_citedref_mean = mean(Cited.Reference.Count),
N_citedref_se = std(Cited.Reference.Count),
N_citations_mean = mean(Times.Cited..WoS.Core),
N_citations_se = std(Times.Cited..WoS.Core),
N_papers = length(Publication.Year),
)
db_LA_year <-
db_LA %>%
group_by(Publication.Year) %>%
summarise(FRE_mean_abs = mean(FRE_abs, na.rm=T),
FRE_se_abs = std(FRE_abs),
FRE_mean_tit = mean(FRE_tit),
FRE_se_tit = std(FRE_tit),
N_aut_mean = mean(N_aut),
N_aut_se = std(N_aut),
N_pages_mean = mean(Number.of.Pages[Number.of.Pages>0]),
N_pages_se = std(Number.of.Pages[Number.of.Pages>0]),
N_citedref_mean = mean(Cited.Reference.Count),
N_citedref_se = std(Cited.Reference.Count),
N_citations_mean = mean(Times.Cited..WoS.Core),
N_citations_se = std(Times.Cited..WoS.Core),
N_papers = length(Publication.Year),
)
db_HY_year <-
db_HY %>%
group_by(Publication.Year) %>%
summarise(FRE_mean_abs = mean(FRE_abs, na.rm=T),
FRE_se_abs = std(FRE_abs),
FRE_mean_tit = mean(FRE_tit),
FRE_se_tit = std(FRE_tit),
N_aut_mean = mean(N_aut),
N_aut_se = std(N_aut),
N_pages_mean = mean(Number.of.Pages[Number.of.Pages>0]),
N_pages_se = std(Number.of.Pages[Number.of.Pages>0]),
N_citedref_mean = mean(Cited.Reference.Count),
N_citedref_se = std(Cited.Reference.Count),
N_citations_mean = mean(Times.Cited..WoS.Core),
N_citations_se = std(Times.Cited..WoS.Core),
N_papers = length(Publication.Year),
)
dim(db_LI_year)
dim(db_OC_year)
dim(db_LA_year)
dim(db_HY_year)
summary(db_LI_year)
summary(db_OC_year)
summary(db_LA_year)
summary(db_HY_year)
##### PLOTS ###############################################################
# Readability abstract plot -----------------------------------------------
(FRE_a_LI <- ggplot(data=db_LI_year, aes(x=Publication.Year, y=FRE_mean_abs)) +
geom_line(aes(x=Publication.Year, y=FRE_mean_abs),linetype = 1,col="blue",alpha=1) +
#geom_errorbar(aes(ymin=FRE_mean_abs-FRE_se_abs, ymax=FRE_mean_abs+FRE_se_abs), width=.8,col="blue") +
#geom_point(size = 2,col="blue")+
labs(title= "Limnology",
x = NULL,
y = "Readability [mean]") + #Flesch-Kincaid Readability Ease
scale_x_continuous(breaks = c(seq(from=1960,to=2020,by = 10)),
labels = c("1960","1970","1980","1990","2000","2010","2020"))+
theme_bw()+
theme(
legend.position = "none",
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
panel.grid = element_blank(),
plot.caption = element_text(size = 10, color = "gray50"),
plot.title = element_text(face="bold", size=12)
)
)
(FRE_a_OC <- ggplot(data=db_OC_year, aes(x=Publication.Year, y=FRE_mean_abs)) +
geom_line(aes(x=Publication.Year, y=FRE_mean_abs),linetype = 1,col="blue",alpha=1) +
#geom_errorbar(aes(ymin=FRE_mean_abs-FRE_se_abs, ymax=FRE_mean_abs+FRE_se_abs), width=.8,col="blue") +
#geom_point(size = 2,col="blue")+
labs(title= "Oceanography",
x = NULL,
y = "Readability [mean]") + #Flesch-Kincaid Readability Ease
scale_x_continuous(breaks = c(seq(from=1960,to=2020,by = 10)),
labels = c("1960","1970","1980","1990","2000","2010","2020"))+
theme_bw()+
theme(
legend.position = "none",
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
panel.grid = element_blank(),
plot.caption = element_text(size = 10, color = "gray50"),
plot.title = element_text(face="bold", size=12)
)
)
(FRE_a_LA <- ggplot(data=db_LA_year, aes(x=Publication.Year, y=FRE_mean_abs)) +
geom_line(aes(x=Publication.Year, y=FRE_mean_abs),linetype = 1,col="blue",alpha=1) +
#geom_errorbar(aes(ymin=FRE_mean_abs-FRE_se_abs, ymax=FRE_mean_abs+FRE_se_abs), width=.8,col="blue") +
#geom_point(size = 2,col="blue")+
labs(title= "Lake Ecology",
x = NULL,
y = "Readability [mean]") + #Flesch-Kincaid Readability Ease
scale_x_continuous(breaks = c(seq(from=1960,to=2020,by = 10)),
labels = c("1960","1970","1980","1990","2000","2010","2020"))+
theme_bw()+
theme(
legend.position = "none",
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
panel.grid = element_blank(),
plot.caption = element_text(size = 10, color = "gray50"),
plot.title = element_text(face="bold", size=12)
)
)
(FRE_a_HY <- ggplot(data=db_HY_year, aes(x=Publication.Year, y=FRE_mean_abs)) +
geom_line(aes(x=Publication.Year, y=FRE_mean_abs),linetype = 1,col="blue",alpha=1) +
#geom_errorbar(aes(ymin=FRE_mean_abs-FRE_se_abs, ymax=FRE_mean_abs+FRE_se_abs), width=.8,col="blue") +
#geom_point(size = 2,col="blue")+
labs(title= "Hydrobiology",
x = NULL,
y = "Readability [mean]") + #Flesch-Kincaid Readability Ease
scale_x_continuous(breaks = c(seq(from=1960,to=2020,by = 10)),
labels = c("1960","1970","1980","1990","2000","2010","2020"))+
theme_bw()+
theme(
legend.position = "none",
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
panel.grid = element_blank(),
plot.caption = element_text(size = 10, color = "gray50"),
plot.title = element_text(face="bold", size=12)
)
)
# four plots together
FRE_a1 <- ggplotGrob(FRE_a_LI)
FRE_a2 <- ggplotGrob(FRE_a_OC)
FRE_a3 <- ggplotGrob(FRE_a_LA)
FRE_a4 <- ggplotGrob(FRE_a_HY)
grid.arrange(
FRE_a1, FRE_a2, FRE_a3, FRE_a4,
nrow=4,
top="Readability of Abstract"
)
# Readability title plot --------------------------------------------------
(FRE_t_LI <- ggplot(data=db_LI_year, aes(x=Publication.Year, y=FRE_mean_tit)) +
geom_line(aes(x=Publication.Year, y=FRE_mean_tit),linetype = 1,col="blue",alpha=1) +
geom_errorbar(aes(ymin=FRE_mean_tit-FRE_se_tit, ymax=FRE_mean_tit+FRE_se_tit), width=.8,col="blue") +
geom_point(size = 2,col="blue")+
labs(title= "Limnology",
x = NULL,
y = "Readability [mean +/- S.E.]") +
scale_x_continuous(breaks = c(seq(from=1960,to=2020,by = 10)),
labels = c("1960","1970","1980","1990","2000","2010","2020"))+
theme_bw()+
theme(
legend.position = "none",
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
panel.grid = element_blank(),
plot.caption = element_text(size = 10, color = "gray50"),
plot.title = element_text(face="bold", size=12)
)
)
(FRE_t_OC <- ggplot(data=db_OC_year, aes(x=Publication.Year, y=FRE_mean_tit)) +
geom_line(aes(x=Publication.Year, y=FRE_mean_tit),linetype = 1,col="blue",alpha=1) +
geom_errorbar(aes(ymin=FRE_mean_tit-FRE_se_tit, ymax=FRE_mean_tit+FRE_se_tit), width=.8,col="blue") +
geom_point(size = 2,col="blue")+
labs(title= "Oceanography",
x = NULL,
y = "Readability [mean +/- S.E.]") +
scale_x_continuous(breaks = c(seq(from=1960,to=2020,by = 10)),
labels = c("1960","1970","1980","1990","2000","2010","2020"))+
theme_bw()+
theme(
legend.position = "none",
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
panel.grid = element_blank(),
plot.caption = element_text(size = 10, color = "gray50"),
plot.title = element_text(face="bold", size=12)
)
)
(FRE_t_LA <- ggplot(data=db_LA_year, aes(x=Publication.Year, y=FRE_mean_tit)) +
geom_line(aes(x=Publication.Year, y=FRE_mean_tit),linetype = 1,col="blue",alpha=1) +
geom_errorbar(aes(ymin=FRE_mean_tit-FRE_se_tit, ymax=FRE_mean_tit+FRE_se_tit), width=.8,col="blue") +
geom_point(size = 2,col="blue")+
labs(title= "Lake Ecology",
x = NULL,
y = "Readability [mean +/- S.E.]") +
scale_x_continuous(breaks = c(seq(from=1960,to=2020,by = 10)),
labels = c("1960","1970","1980","1990","2000","2010","2020"))+
theme_bw()+
theme(
legend.position = "none",
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
panel.grid = element_blank(),
plot.caption = element_text(size = 10, color = "gray50"),
plot.title = element_text(face="bold", size=12)
)
)
(FRE_t_HY <- ggplot(data=db_HY_year, aes(x=Publication.Year, y=FRE_mean_tit)) +
geom_line(aes(x=Publication.Year, y=FRE_mean_tit),linetype = 1,col="blue",alpha=1) +
geom_errorbar(aes(ymin=FRE_mean_tit-FRE_se_tit, ymax=FRE_mean_tit+FRE_se_tit), width=.8,col="blue") +
geom_point(size = 2,col="blue")+
labs(title= "Hydrobiology",
x = NULL,
y = "Readability [mean +/- S.E.]") +
scale_x_continuous(breaks = c(seq(from=1960,to=2020,by = 10)),
labels = c("1960","1970","1980","1990","2000","2010","2020"))+
theme_bw()+
theme(
legend.position = "none",
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
panel.grid = element_blank(),
plot.caption = element_text(size = 10, color = "gray50"),
plot.title = element_text(face="bold", size=12)
)
)
# four plots together
FRE_t1 <- ggplotGrob(FRE_t_LI)
FRE_t2 <- ggplotGrob(FRE_t_OC)
FRE_t3 <- ggplotGrob(FRE_t_LA)
FRE_t4 <- ggplotGrob(FRE_t_HY)
grid.arrange(
FRE_t1, FRE_t2, FRE_t3, FRE_t4,
nrow=4,
top="Readability of Title"
)
# Number of authors --------------------------------------------------------
(Nau_LI <- ggplot(data=db_LI_year, aes(x=Publication.Year, y=N_aut_mean)) +
geom_line(aes(x=Publication.Year, y=N_aut_mean),linetype = 1,col="blue",alpha=1) +
geom_errorbar(aes(ymin=N_aut_mean-N_aut_se, ymax=N_aut_mean+N_aut_se), width=.8,col="blue") +
geom_point(size = 2,col="blue")+
labs(title= "Limnology",
x = NULL,
y = "Number of authors [mean +/- S.E.]") +
scale_x_continuous(breaks = c(seq(from=1960, to=2020, by=10)),
labels = c("1960","1970","1980","1990","2000","2010","2020"))+
ylim(1,7) +
theme_bw()+
theme(
legend.position = "none",
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
panel.grid = element_blank(),
plot.caption = element_text(size = 10, color = "gray50"),
plot.title = element_text(face="bold", size=12)
)
)
(Nau_OC <- ggplot(data=db_OC_year, aes(x=Publication.Year, y=N_aut_mean)) +
geom_line(aes(x=Publication.Year, y=N_aut_mean),linetype = 1,col="blue",alpha=1) +
geom_errorbar(aes(ymin=N_aut_mean-N_aut_se, ymax=N_aut_mean+N_aut_se), width=.8,col="blue") +
geom_point(size = 2,col="blue")+
labs(title= "Oceanography",
x = NULL,
y = "Number of authors [mean +/- S.E.]") +
scale_x_continuous(breaks = c(seq(from=1960,to=2020,by = 10)),
labels = c("1960","1970","1980","1990","2000","2010","2020"))+
ylim(1,7) +
theme_bw()+
theme(
legend.position = "none",
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
panel.grid = element_blank(),
plot.caption = element_text(size = 10, color = "gray50"),
plot.title = element_text(face="bold", size=12)
)
)
(Nau_LA <- ggplot(data=db_LA_year, aes(x=Publication.Year, y=N_aut_mean)) +
geom_line(aes(x=Publication.Year, y=N_aut_mean),linetype = 1,col="blue",alpha=1) +
geom_errorbar(aes(ymin=N_aut_mean-N_aut_se, ymax=N_aut_mean+N_aut_se), width=.8,col="blue") +
geom_point(size = 2,col="blue")+
labs(title= "Lake Ecology",
x = NULL,
y = "Number of authors [mean +/- S.E.]") +
scale_x_continuous(breaks = c(seq(from=1960,to=2020,by = 10)),
labels = c("1960","1970","1980","1990","2000","2010","2020"))+
ylim(1,7) +
theme_bw()+
theme(
legend.position = "none",
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
panel.grid = element_blank(),
plot.caption = element_text(size = 10, color = "gray50"),
plot.title = element_text(face="bold", size=12)
)
)
(Nau_HY <- ggplot(data=db_HY_year, aes(x=Publication.Year, y=N_aut_mean)) +
geom_line(aes(x=Publication.Year, y=N_aut_mean),linetype = 1,col="blue",alpha=1) +
geom_errorbar(aes(ymin=N_aut_mean-N_aut_se, ymax=N_aut_mean+N_aut_se), width=.8,col="blue") +
geom_point(size = 2,col="blue")+
labs(title= "Hydrobiology",
x = NULL,
y = "Number of authors [mean +/- S.E.]") +
scale_x_continuous(breaks = c(seq(from=1960,to=2020,by = 10)),
labels = c("1960","1970","1980","1990","2000","2010","2020"))+
ylim(1,7) +
theme_bw()+
theme(
legend.position = "none",
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
panel.grid = element_blank(),
plot.caption = element_text(size = 10, color = "gray50"),
plot.title = element_text(face="bold", size=12)
)
)
# four plots together
Nau_1 <- ggplotGrob(Nau_LI)
Nau_2 <- ggplotGrob(Nau_OC)
Nau_3 <- ggplotGrob(Nau_LA)
Nau_4 <- ggplotGrob(Nau_HY)
grid.arrange(
Nau_1, Nau_2, Nau_3, Nau_4,
nrow=4,
top="Number of authors"
)
# Number of pages ---------------------------------------------------------
(Npa_LI <- ggplot(data=db_LI_year, aes(x=Publication.Year, y=N_pages_mean)) +
geom_line(aes(x=Publication.Year, y=N_pages_mean),linetype = 1,col="blue",alpha=1) +
geom_errorbar(aes(ymin=N_pages_mean-N_pages_se, ymax=N_pages_mean+N_pages_se), width=.8,col="blue") +
geom_point(size = 2,col="blue")+
labs(title= "Limnology",
x = NULL,
y = "Number of authors [mean +/- S.E.]") +
scale_x_continuous(breaks = c(seq(from=1960,to=2020,by = 10)),
labels = c("1960","1970","1980","1990","2000","2010","2020"))+
ylim(1,30) +
theme_bw()+
theme(
legend.position = "none",
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
panel.grid = element_blank(),
plot.caption = element_text(size = 10, color = "gray50"),
plot.title = element_text(face="bold", size=12)
)
)
(Npa_OC <- ggplot(data=db_OC_year, aes(x=Publication.Year, y=N_pages_mean)) +
geom_line(aes(x=Publication.Year, y=N_pages_mean),linetype = 1,col="blue",alpha=1) +
geom_errorbar(aes(ymin=N_pages_mean-N_pages_se, ymax=N_pages_mean+N_pages_se), width=.8,col="blue") +
geom_point(size = 2,col="blue")+
labs(title= "Oceanography",
x = NULL,
y = "Number of authors [mean +/- S.E.]") +
scale_x_continuous(breaks = c(seq(from=1960,to=2020,by = 10)),
labels = c("1960","1970","1980","1990","2000","2010","2020"))+
ylim(1,30) +
theme_bw()+
theme(
legend.position = "none",
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
panel.grid = element_blank(),
plot.caption = element_text(size = 10, color = "gray50"),
plot.title = element_text(face="bold", size=12)
)
)
(Npa_LA <- ggplot(data=db_LA_year, aes(x=Publication.Year, y=N_pages_mean)) +
geom_line(aes(x=Publication.Year, y=N_pages_mean),linetype = 1,col="blue",alpha=1) +
geom_errorbar(aes(ymin=N_pages_mean-N_pages_se, ymax=N_pages_mean+N_pages_se), width=.8,col="blue") +
geom_point(size = 2,col="blue")+
labs(title= "Lake Ecology",
x = NULL,
y = "Number of authors [mean +/- S.E.]") +
scale_x_continuous(breaks = c(seq(from=1960,to=2020,by = 10)),
labels = c("1960","1970","1980","1990","2000","2010","2020"))+
ylim(1,30) +
theme_bw()+
theme(
legend.position = "none",
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
panel.grid = element_blank(),
plot.caption = element_text(size = 10, color = "gray50"),
plot.title = element_text(face="bold", size=12)
)
)
(Npa_HY <- ggplot(data=db_HY_year, aes(x=Publication.Year, y=N_pages_mean)) +
geom_line(aes(x=Publication.Year, y=N_pages_mean),linetype = 1,col="blue",alpha=1) +
geom_errorbar(aes(ymin=N_pages_mean-N_pages_se, ymax=N_pages_mean+N_pages_se), width=.8,col="blue") +
geom_point(size = 2,col="blue")+
labs(title= "Hydrobiology",
x = NULL,
y = "Number of authors [mean +/- S.E.]") +
scale_x_continuous(breaks = c(seq(from=1960,to=2020,by = 10)),
labels = c("1960","1970","1980","1990","2000","2010","2020"))+
ylim(1,30) +
theme_bw()+
theme(
legend.position = "none",
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
panel.grid = element_blank(),
plot.caption = element_text(size = 10, color = "gray50"),
plot.title = element_text(face="bold", size=12)
)
)
# four plots together
Npa_1 <- ggplotGrob(Npa_LI)
Npa_2 <- ggplotGrob(Npa_OC)
Npa_3 <- ggplotGrob(Npa_LA)
Npa_4 <- ggplotGrob(Npa_HY)
grid.arrange(
Npa_1, Npa_2, Npa_3, Npa_4,
nrow=4,
top="Number of pages"
)
# Number of cited references ----------------------------------------------
(Ncr_LI <- ggplot(data=db_LI_year, aes(x=Publication.Year, y=N_citedref_mean)) +
geom_line(aes(x=Publication.Year, y=N_citedref_mean),linetype = 1,col="blue",alpha=1) +
geom_errorbar(aes(ymin=N_citedref_mean-N_citedref_se, ymax=N_citedref_mean+N_citedref_se), width=.8,col="blue") +
geom_point(size = 2,col="blue")+
labs(title= "Limnology",
x = NULL,
y = "Number of cited references [mean +/- S.E.]") +
scale_x_continuous(breaks = c(seq(from=1960,to=2020,by = 10)),
labels = c("1960","1970","1980","1990","2000","2010","2020"))+
ylim(1,80) +
theme_bw()+
theme(
legend.position = "none",
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
panel.grid = element_blank(),
plot.caption = element_text(size = 10, color = "gray50"),
plot.title = element_text(face="bold", size=12)
)
)
(Ncr_OC <- ggplot(data=db_OC_year, aes(x=Publication.Year, y=N_citedref_mean)) +
geom_line(aes(x=Publication.Year, y=N_citedref_mean),linetype = 1,col="blue",alpha=1) +
geom_errorbar(aes(ymin=N_citedref_mean-N_citedref_se, ymax=N_citedref_mean+N_citedref_se), width=.8,col="blue") +
geom_point(size = 2,col="blue")+
labs(title= "Oceanography",
x = NULL,
y = "Number of cited references [mean +/- S.E.]") +
scale_x_continuous(breaks = c(seq(from=1960,to=2020,by = 10)),
labels = c("1960","1970","1980","1990","2000","2010","2020"))+
ylim(1,80) +
theme_bw()+
theme(
legend.position = "none",
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
panel.grid = element_blank(),
plot.caption = element_text(size = 10, color = "gray50"),
plot.title = element_text(face="bold", size=12)
)
)
(Ncr_LA <- ggplot(data=db_LA_year, aes(x=Publication.Year, y=N_citedref_mean)) +
geom_line(aes(x=Publication.Year, y=N_citedref_mean),linetype = 1,col="blue",alpha=1) +
geom_errorbar(aes(ymin=N_citedref_mean-N_citedref_se, ymax=N_citedref_mean+N_citedref_se), width=.8,col="blue") +
geom_point(size = 2,col="blue")+
labs(title= "Lake Ecology",
x = NULL,
y = "Number of cited references [mean +/- S.E.]") +
scale_x_continuous(breaks = c(seq(from=1960,to=2020,by = 10)),
labels = c("1960","1970","1980","1990","2000","2010","2020"))+
ylim(1,80) +
theme_bw()+
theme(
legend.position = "none",
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
panel.grid = element_blank(),
plot.caption = element_text(size = 10, color = "gray50"),
plot.title = element_text(face="bold", size=12)
)
)
(Ncr_HY <- ggplot(data=db_HY_year, aes(x=Publication.Year, y=N_citedref_mean)) +
geom_line(aes(x=Publication.Year, y=N_citedref_mean),linetype = 1,col="blue",alpha=1) +
geom_errorbar(aes(ymin=N_citedref_mean-N_citedref_se, ymax=N_citedref_mean+N_citedref_se), width=.8,col="blue") +
geom_point(size = 2,col="blue")+
labs(title= "Hydrobiology",
x = NULL,
y = "Number of cited references [mean +/- S.E.]") +
scale_x_continuous(breaks = c(seq(from=1960,to=2020,by = 10)),
labels = c("1960","1970","1980","1990","2000","2010","2020"))+
ylim(1,80) +
theme_bw()+
theme(
legend.position = "none",
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
panel.grid = element_blank(),
plot.caption = element_text(size = 10, color = "gray50"),
plot.title = element_text(face="bold", size=12)
)
)
# four plots together
Ncr_1 <- ggplotGrob(Ncr_LI)
Ncr_2 <- ggplotGrob(Ncr_OC)
Ncr_3 <- ggplotGrob(Ncr_LA)
Ncr_4 <- ggplotGrob(Ncr_HY)
grid.arrange(
Ncr_1, Ncr_2, Ncr_3, Ncr_4,
nrow=4,
top="Number of cited references"
)
# Number of citations -----------------------------------------------------
(Nci_LI <- ggplot(data=db_LI_year, aes(x=Publication.Year, y=N_citations_mean)) +
geom_line(aes(x=Publication.Year, y=N_citations_mean),linetype = 1,col="blue",alpha=1) +
geom_errorbar(aes(ymin=N_citations_mean-N_citations_se, ymax=N_citations_mean+N_citations_se), width=.8,col="blue") +
geom_point(size = 2,col="blue")+
labs(title= "Limnology",
x = NULL,
y = "Number of citations [mean +/- S.E.]") +
scale_x_continuous(breaks = c(seq(from=1960,to=2020,by = 10)),
labels = c("1960","1970","1980","1990","2000","2010","2020"))+
ylim(1,80) +
theme_bw()+
theme(
legend.position = "none",
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
panel.grid = element_blank(),
plot.caption = element_text(size = 10, color = "gray50"),
plot.title = element_text(face="bold", size=12)
)
)
(Nci_OC <- ggplot(data=db_OC_year, aes(x=Publication.Year, y=N_citations_mean)) +
geom_line(aes(x=Publication.Year, y=N_citations_mean),linetype = 1,col="blue",alpha=1) +
geom_errorbar(aes(ymin=N_citations_mean-N_citations_se, ymax=N_citations_mean+N_citations_se), width=.8,col="blue") +
geom_point(size = 2,col="blue")+
labs(title= "Oceanography",
x = NULL,
y = "Number of citations [mean +/- S.E.]") +
scale_x_continuous(breaks = c(seq(from=1960,to=2020,by = 10)),
labels = c("1960","1970","1980","1990","2000","2010","2020"))+
ylim(1,80) +
theme_bw()+
theme(
legend.position = "none",
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
panel.grid = element_blank(),
plot.caption = element_text(size = 10, color = "gray50"),
plot.title = element_text(face="bold", size=12)
)
)
(Nci_LA <- ggplot(data=db_LA_year, aes(x=Publication.Year, y=N_citations_mean)) +
geom_line(aes(x=Publication.Year, y=N_citations_mean),linetype = 1,col="blue",alpha=1) +
geom_errorbar(aes(ymin=N_citations_mean-N_citations_se, ymax=N_citations_mean+N_citations_se), width=.8,col="blue") +
geom_point(size = 2,col="blue")+
labs(title= "Lake Ecology",
x = NULL,
y = "Number of citations [mean +/- S.E.]") +
scale_x_continuous(breaks = c(seq(from=1960,to=2020,by = 10)),
labels = c("1960","1970","1980","1990","2000","2010","2020"))+
ylim(1,80) +
theme_bw()+
theme(
legend.position = "none",
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
panel.grid = element_blank(),
plot.caption = element_text(size = 10, color = "gray50"),
plot.title = element_text(face="bold", size=12)
)
)
(Nci_HY <- ggplot(data=db_HY_year, aes(x=Publication.Year, y=N_citations_mean)) +
geom_line(aes(x=Publication.Year, y=N_citations_mean),linetype = 1,col="blue",alpha=1) +
geom_errorbar(aes(ymin=N_citations_mean-N_citations_se, ymax=N_citations_mean+N_citations_se), width=.8,col="blue") +
geom_point(size = 2,col="blue")+
labs(title= "Hydrobiology",
x = NULL,
y = "Number of citations [mean +/- S.E.]") +
scale_x_continuous(breaks = c(seq(from=1960,to=2020,by = 10)),
labels = c("1960","1970","1980","1990","2000","2010","2020"))+
ylim(1,80) +
theme_bw()+
theme(
legend.position = "none",
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
panel.grid = element_blank(),
plot.caption = element_text(size = 10, color = "gray50"),
plot.title = element_text(face="bold", size=12)
)
)
# four plots together
Nci_1 <- ggplotGrob(Nci_LI)
Nci_2 <- ggplotGrob(Nci_OC)
Nci_3 <- ggplotGrob(Nci_LA)
Nci_4 <- ggplotGrob(Nci_HY)
grid.arrange(
Nci_1, Nci_2, Nci_3, Nci_4,
nrow=4,
top="Number of citations"
)
# Number of papers --------------------------------------------------------
(Npa_LI <- ggplot(data=db_LI_year, aes(x=Publication.Year, y=N_papers)) +
geom_line(aes(x=Publication.Year, y= N_papers),linetype = 1,col="blue",alpha=1) +
#geom_errorbar(aes(ymin=FRE_mean_abs-FRE_se_abs, ymax=FRE_mean_abs+FRE_se_abs), width=.8,col="blue") +
#geom_point(size = 2,col="blue")+
labs(title= "Limnology",
x = NULL,
y = "Number of papers") +
scale_x_continuous(breaks = c(seq(from=1960,to=2020,by = 10)),
labels = c("1960","1970","1980","1990","2000","2010","2020"))+
ylim(1,2000) +
theme_bw()+
theme(
legend.position = "none",
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
panel.grid = element_blank(),
plot.caption = element_text(size = 10, color = "gray50"),
plot.title = element_text(face="bold", size=12)
)
)
(Npa_OC <- ggplot(data=db_OC_year, aes(x=Publication.Year, y=N_papers)) +
geom_line(aes(x=Publication.Year, y= N_papers),linetype = 1,col="blue",alpha=1) +
#geom_errorbar(aes(ymin=FRE_mean_abs-FRE_se_abs, ymax=FRE_mean_abs+FRE_se_abs), width=.8,col="blue") +
#geom_point(size = 2,col="blue")+
labs(title= "Oceanography",
x = NULL,
y = "Number of papers") +
scale_x_continuous(breaks = c(seq(from=1960,to=2020,by = 10)),
labels = c("1960","1970","1980","1990","2000","2010","2020"))+
ylim(1,2000) +
theme_bw()+
theme(
legend.position = "none",
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
panel.grid = element_blank(),
plot.caption = element_text(size = 10, color = "gray50"),
plot.title = element_text(face="bold", size=12)
)
)
(Npa_LA <- ggplot(data=db_LA_year, aes(x=Publication.Year, y=N_papers)) +
geom_line(aes(x=Publication.Year, y= N_papers),linetype = 1,col="blue",alpha=1) +
#geom_errorbar(aes(ymin=FRE_mean_abs-FRE_se_abs, ymax=FRE_mean_abs+FRE_se_abs), width=.8,col="blue") +
#geom_point(size = 2,col="blue")+
labs(title= "Lake Ecology",
x = NULL,
y = "Number of papers") +
scale_x_continuous(breaks = c(seq(from=1960,to=2020,by = 10)),
labels = c("1960","1970","1980","1990","2000","2010","2020"))+
ylim(1,2000) +
theme_bw()+
theme(
legend.position = "none",
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
panel.grid = element_blank(),
plot.caption = element_text(size = 10, color = "gray50"),
plot.title = element_text(face="bold", size=12)
)
)
(Npa_HY <- ggplot(data=db_HY_year, aes(x=Publication.Year, y=N_papers)) +
geom_line(aes(x=Publication.Year, y= N_papers),linetype = 1,col="blue",alpha=1) +
#geom_errorbar(aes(ymin=FRE_mean_abs-FRE_se_abs, ymax=FRE_mean_abs+FRE_se_abs), width=.8,col="blue") +
#geom_point(size = 2,col="blue")+
labs(title= "Hydrobiology",
x = NULL,
y = "Number of papers") +
scale_x_continuous(breaks = c(seq(from=1960,to=2020,by = 10)),
labels = c("1960","1970","1980","1990","2000","2010","2020"))+
ylim(1,2000) +
theme_bw()+
theme(
legend.position = "none",
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
panel.grid = element_blank(),
plot.caption = element_text(size = 10, color = "gray50"),
plot.title = element_text(face="bold", size=12)
)
)
# four plots together
Npa_1 <- ggplotGrob(Npa_LI)
Npa_2 <- ggplotGrob(Npa_OC)
Npa_3 <- ggplotGrob(Npa_LA)
Npa_4 <- ggplotGrob(Npa_HY)
grid.arrange(
Npa_1, Npa_2, Npa_3, Npa_4,
nrow=4,
top="Number of papers"
)
# ANALYSES FOR NUMBER OF PAPERS
# prepare dataset
merged <- rbind(db_LI_year, db_OC_year, db_LA_year, db_HY_year)
merged$topic <- as.factor(c(rep('limnology',nrow(db_LI_year)),
rep('oceanography',nrow(db_OC_year)),
rep('lake ecology',nrow(db_LA_year)),
rep('hydrobiology',nrow(db_HY_year))))
summary(merged)
names(merged)
# load libraries
library('car')
library('emmeans')
library('gam')
library('MASS')
library('performance')
# run gam model
m_pap <- gam::gam(N_papers ~ topic*s(Publication.Year),
family=poisson, data=merged)
performance::check_model(m_pap)
summary(m_pap)
pairs(emmeans::emmeans(m_pap, ~topic*s(Publication.Year)),
simple="topic")
# run nb.glm model
m_pap2 <- MASS::glm.nb(N_papers ~ topic*Publication.Year, data=merged)
performance::check_model(m_pap2)
car::Anova(m_pap2)
pairs(emmeans::emmeans(m_pap2, ~topic*Publication.Year),
simple="topic")
# plot the results
ggplot2::ggplot(merged, aes(x=Publication.Year, y=N_papers)) +
theme_light() +
geom_point(aes(colour=topic)) +
geom_smooth(aes(colour=topic)) +
scale_y_continuous(trans='log2') +
scale_x_continuous(breaks = c(seq(from=1960, to=2020, by=10)),
labels=c("1960","1970","1980","1990","2000","2010","2020")) +
labs(x=NULL, y="Number of papers") +
scale_color_brewer(palette="Paired") |
integration.constant <- function( density, w.lo, w.hi, ... ){
#
# Return the scalar so that integral from 0 to w of underlying density
# is 1.0
#
# Input:
# density = a function to compute integration constant for.
# this function must be capable of evaluating values from 0 to w
# w = upper limit of integral.
# ... = additional arguments to density. These vary by density function,
# but generally are parameter values, series, expansion terms, etc.
#
# Output:
# a divisor scalar such that density / scalar integrates to 1.0. i.e.,
# this output scalar is the integral of unscaled density from 0 to w.
#
density = match.fun(density)
seqx = seq(w.lo, w.hi, length=200)
seqy = density( dist=seqx, scale=FALSE, w.lo=w.lo, w.hi=w.hi,...)
# Trapazoid rule
scaler= (seqx[2]-seqx[1]) * sum(seqy[-length(seqy)]+seqy[-1]) / 2
scaler
}
| /Rdistance/R/integration.constant.R | no_license | ingted/R-Examples | R | false | false | 895 | r | integration.constant <- function( density, w.lo, w.hi, ... ){
#
# Return the scalar so that integral from 0 to w of underlying density
# is 1.0
#
# Input:
# density = a function to compute integration constant for.
# this function must be capable of evaluating values from 0 to w
# w = upper limit of integral.
# ... = additional arguments to density. These vary by density function,
# but generally are parameter values, series, expansion terms, etc.
#
# Output:
# a divisor scalar such that density / scalar integrates to 1.0. i.e.,
# this output scalar is the integral of unscaled density from 0 to w.
#
density = match.fun(density)
seqx = seq(w.lo, w.hi, length=200)
seqy = density( dist=seqx, scale=FALSE, w.lo=w.lo, w.hi=w.hi,...)
# Trapazoid rule
scaler= (seqx[2]-seqx[1]) * sum(seqy[-length(seqy)]+seqy[-1]) / 2
scaler
}
|
# Data frames: adding and removing columns
dat <- data.frame(x=LETTERS[1:3], y=1:3)
dat
dat[,1]
dat$x
dat$z <- dat$y^2
dat$name <- c("Cat", "Vic", "Osc")
dat$y<-NULL
dat
# Mergin data frames
dat1 <- data.frame(name=c("Cat", "Vic", "Osc"), age=c(9,7,4))
dat1
dat2 <- data.frame(names=c("Vic","Cat", "Osc"), gender=c("Male","Female","Male"))
dat2
dat <- merge(dat1, dat2, by.x="name", by.y="names")
dat
# Getting dimension and column info
df <- dat
df
names(df)
class(df$name)
class(df$age)
dim(df)
nrow(df)
ncol(df)
# Object structure
str(df)
head(airquality, 3)
tail(airquality, 3)
# The subset() function
head(airquality, 3)
datA <- airquality[airquality$Temp>80,c("Ozone","Temp")]
datA <- subset(airquality, Temp > 80, select = c(Ozone, Temp))
datB <- subset(airquality, Day == 1, select = -Temp)
datC <- subset(airquality, select = Ozone:Wind)
# The summary() function
summary(airquality$Wind)
summary(airquality)
summary(airquality)
# Missing values
colMeans(airquality)
is.na(NA)
s <- subset(airquality, !is.na(Ozone) )
colMeans(s)
mean(airquality$Ozone, na.rm=TRUE)
# Text manipulation
txt <- c("Hello, my",
"name is",
"anders."
)
grep("name", txt)
grepl("name", txt)
sub("anders", "Anders", txt)
df <- data.frame(
person.ID = 1:3,
fruit =
c("apple: 3 Orange : 9 banana:2",
" Orange:1 Apple: 3 banana: 10",
"banana: 3 Apple: 3 Orange : 04 "
))
df
# Regular expressions
pattern <- ".*orange[ :]*([0-9]*).*"
sub(pattern, "\\1", df$fruit, ignore.case=TRUE)
connStr <- paste(
"Server=msedxeus.database.windows.net",
"Database=DAT209x01",
"uid=RLogin",
"pwd=P@ssw0rd",
"Driver={SQL Server}",
sep=";"
)
if(.Platform$OS.type != "windows"){
connStr <- paste(
"Server=msedxeus.database.windows.net",
"Database=DAT209x01",
"uid=RLogin",
"pwd=P@ssw0rd",
"Driver=FreeTDS",
"TDS_Version=8.0",
"Port=1433",
sep=";"
)
}
library(RODBC)
# Date and Time object
as.Date("2016-03-10")
as.POSIXct("2016-03-10 13:53:38 CET")
conn <- odbcDriverConnect(connStr)
df <- sqlQuery(conn, "SELECT TOP 2000 * FROM bi.sentiment")
class(df$Date)
mean(df$Date)
mean(df$Date+10)
mean(as.Date(df$Date))
mean(as.Date(df$Date)+10)
old.locale<-Sys.getlocale(category = "LC_TIME")
Sys.setlocale("LC_TIME", "English")
table(weekdays(df$Date))
table(months(df$Date))
Sys.setlocale("LC_TIME",old.locale)
close(conn)
| /3.0 Working with Data/Working with Data.R | permissive | riyaaditya/Data-Science-Using-R | R | false | false | 2,450 | r | # Data frames: adding and removing columns
dat <- data.frame(x=LETTERS[1:3], y=1:3)
dat
dat[,1]
dat$x
dat$z <- dat$y^2
dat$name <- c("Cat", "Vic", "Osc")
dat$y<-NULL
dat
# Mergin data frames
dat1 <- data.frame(name=c("Cat", "Vic", "Osc"), age=c(9,7,4))
dat1
dat2 <- data.frame(names=c("Vic","Cat", "Osc"), gender=c("Male","Female","Male"))
dat2
dat <- merge(dat1, dat2, by.x="name", by.y="names")
dat
# Getting dimension and column info
df <- dat
df
names(df)
class(df$name)
class(df$age)
dim(df)
nrow(df)
ncol(df)
# Object structure
str(df)
head(airquality, 3)
tail(airquality, 3)
# The subset() function
head(airquality, 3)
datA <- airquality[airquality$Temp>80,c("Ozone","Temp")]
datA <- subset(airquality, Temp > 80, select = c(Ozone, Temp))
datB <- subset(airquality, Day == 1, select = -Temp)
datC <- subset(airquality, select = Ozone:Wind)
# The summary() function
summary(airquality$Wind)
summary(airquality)
summary(airquality)
# Missing values
colMeans(airquality)
is.na(NA)
s <- subset(airquality, !is.na(Ozone) )
colMeans(s)
mean(airquality$Ozone, na.rm=TRUE)
# Text manipulation
txt <- c("Hello, my",
"name is",
"anders."
)
grep("name", txt)
grepl("name", txt)
sub("anders", "Anders", txt)
df <- data.frame(
person.ID = 1:3,
fruit =
c("apple: 3 Orange : 9 banana:2",
" Orange:1 Apple: 3 banana: 10",
"banana: 3 Apple: 3 Orange : 04 "
))
df
# Regular expressions
pattern <- ".*orange[ :]*([0-9]*).*"
sub(pattern, "\\1", df$fruit, ignore.case=TRUE)
connStr <- paste(
"Server=msedxeus.database.windows.net",
"Database=DAT209x01",
"uid=RLogin",
"pwd=P@ssw0rd",
"Driver={SQL Server}",
sep=";"
)
if(.Platform$OS.type != "windows"){
connStr <- paste(
"Server=msedxeus.database.windows.net",
"Database=DAT209x01",
"uid=RLogin",
"pwd=P@ssw0rd",
"Driver=FreeTDS",
"TDS_Version=8.0",
"Port=1433",
sep=";"
)
}
library(RODBC)
# Date and Time object
as.Date("2016-03-10")
as.POSIXct("2016-03-10 13:53:38 CET")
conn <- odbcDriverConnect(connStr)
df <- sqlQuery(conn, "SELECT TOP 2000 * FROM bi.sentiment")
class(df$Date)
mean(df$Date)
mean(df$Date+10)
mean(as.Date(df$Date))
mean(as.Date(df$Date)+10)
old.locale<-Sys.getlocale(category = "LC_TIME")
Sys.setlocale("LC_TIME", "English")
table(weekdays(df$Date))
table(months(df$Date))
Sys.setlocale("LC_TIME",old.locale)
close(conn)
|
library(data.table)
library(tidyverse)
library(scales)
library(extrafont)
setwd("/Volumes/Macintosh HD/Users/au460892/Desktop/R-projects/DDR_project_Jan_2021/")
data = fread("Data/LOF_all.tsv")
all_samples = fread("Data/all_donors.tsv")
features = fread("Data/COMBINED_featureCounts.tsv")
colnames(features) = str_replace_all(colnames(features), "-",".")
colnames(features) = str_replace_all(colnames(features), ">","_._")
data = filter(data, Sample_ID %in% features$Sample_ID)
table(!duplicated(data$Sample_ID))
features$Signature.1 = NULL
#Increase in SV overall
source("Figures/Figure_4_new/Vignettes/GF/Feature_enrichment_SUMMED_FEAT.R")
ATRX =feat_enrich_summedFeat(gene = "ATRX",cohort = c("CNS"),
feat = grep("cluster", colnames(features), value = T)
)
ATRX2 = d2
IDH1 =feat_enrich_summedFeat(gene = "IDH1",cohort = c("CNS"),
feat = grep("cluster", colnames(features), value = T)
)
IDH12 = d2
d2 = bind_rows(ATRX2, IDH12)%>%na.omit()%>%
rename(Feature = name)
#Annotate patients with mutation in both IDH1 and ATRX
ATRX = filter(d2, Gene == "ATRX", CDK12 == "LOF")
IDH1 = filter(d2, Gene == "IDH1", CDK12 == "LOF")
both_lof = filter(ATRX, Sample_ID %in% IDH1$Sample_ID)%>%mutate(Gene="ATRX+IDH1")
#WT
ATRX = filter(d2, Gene == "ATRX", CDK12 == "WT")
IDH1 = filter(d2, Gene == "IDH1", CDK12 == "WT")
both_wt = filter(ATRX, Sample_ID %in% IDH1$Sample_ID)%>%mutate(Gene="ATRX+IDH1")
#Remove ATRX and IDH1 mutated samples with mutations in both
single_lof = filter(d2, Sample_ID %in% both_lof$Sample_ID & Gene %in% c("ATRX", "IDH1"))
d2 = bind_rows(d2, both_lof, both_wt)
d2 = anti_join(d2, single_lof)
for(gene in unique(d2$Gene)){
for(ct in unique(d2$primaryTumorLocation)){
try({
x = filter(d2, Gene == gene & primaryTumorLocation == ct & CDK12 == "LOF")
y = filter(d2, Gene == gene & primaryTumorLocation == ct & CDK12 == "WT")
d2$p[d2$Gene == gene & d2$primaryTumorLocation == ct] = wilcox.test(x$value, y$value)$p.value
})
}
}
d2$Feature = "Summed #SVs per patient"
d2 = d2%>%group_by(CDK12, Gene)%>%mutate(n = n())%>%ungroup()
d2 = filter(d2, CDK12 != "VUS")
d2 = mutate(d2, CDK12 = ifelse(CDK12 == "LOF", "RB1-d", "RB1-wt"))
#d2$gene = factor(d2$Gene, levels = c("IDH1+ATRX", "ATRX", "IDH1", "BRCA2"))
SV = ggplot(filter(d2, CDK12 != "VUS"), aes(x = CDK12, y = value+0.1))+
geom_boxplot(outlier.shape = NA, width= 0.6)+
geom_jitter(width = 0.1, size = 0.3, alpha = 0.5)+
ggpubr::stat_compare_means(comparisons = list(c("RB1-d","RB1-wt")), paired = F,
method = "wilcox.test", size = 2.5, family = "Arial")+
facet_wrap(~Gene, scales = "free_x", ncol = 3)+
scale_y_log10() +
xlab("")+
ylab("#SVs")+
theme_bw(base_size = 9, base_family = "Arial")+
theme(
panel.grid = element_blank()
)+
scale_fill_manual(values = c("goldenrod", "skyblue"))+
scale_color_manual(values = c("grey", "brown"))+
guides(fill = guide_legend(title = "Status"))+
geom_text(data = distinct(d2, n, .keep_all = T), size = 2.5, family = "Arial",
aes(x = CDK12, y = 0.05, label = paste("n=",n, sep= "")))
SV
#Increase in MMR signatures summed ATRX and IDH1
#source("Figures/Figure_4_new/Vignettes/GF/Feature_enrichment_SUMMED_FEAT.R")
ATRX =feat_enrich_summedFeat(gene = c("ATRX"),cohort = c("CNS"),
feat = c("Signature.MMR2", "Signature.26")
)
ATRX2 = d2
IDH1 =feat_enrich_summedFeat(gene = c("IDH1"), cohort = c("CNS"),
feat = c("Signature.MMR2", "Signature.26")
)
IDH12 = d2
d2 = bind_rows(ATRX2, IDH12)%>%na.omit()%>%
rename(Feature = name)
#Annotate patients with mutation in both IDH1 and ATRX
ATRX = filter(d2, Gene == "ATRX", CDK12 == "LOF")
IDH1 = filter(d2, Gene == "IDH1", CDK12 == "LOF")
both_lof = filter(ATRX, Sample_ID %in% IDH1$Sample_ID)%>%mutate(Gene="ATRX+IDH1")
#WT
ATRX = filter(d2, Gene == "ATRX", CDK12 == "WT")
IDH1 = filter(d2, Gene == "IDH1", CDK12 == "WT")
both_wt = filter(ATRX, Sample_ID %in% IDH1$Sample_ID)%>%mutate(Gene="ATRX+IDH1")
#Remove ATRX and IDH1 mutated samples with mutations in both
single_lof = filter(d2, Sample_ID %in% both_lof$Sample_ID & Gene %in% c("ATRX", "IDH1"))
d2 = bind_rows(d2, both_lof, both_wt)
d2 = anti_join(d2, single_lof)
for(gene in unique(d2$Gene)){
for(ct in unique(d2$primaryTumorLocation)){
try({
x = filter(d2, Gene == gene & primaryTumorLocation == ct & CDK12 == "LOF")
y = filter(d2, Gene == gene & primaryTumorLocation == ct & CDK12 == "WT")
d2$p[d2$Gene == gene & d2$primaryTumorLocation == ct] = wilcox.test(x$value, y$value)$p.value
})
}
}
d2 = filter(d2, CDK12 != "VUS")
d2 = d2%>%group_by(CDK12, Gene)%>%mutate(n = n())%>%ungroup()
d2 = mutate(d2, CDK12 = ifelse(CDK12 == "LOF", "RB1-d", "RB1-wt"))
MMR = ggplot(filter(d2, CDK12 != "VUS"), aes(x = CDK12, y = value+0.1))+
geom_boxplot(outlier.shape = NA, width= 0.6)+
geom_jitter(width = 0.1, size = 0.3, alpha = 0.5)+
ggpubr::stat_compare_means(comparisons = list(c("RB1-d","RB1-wt")), paired = F,
method = "wilcox.test", size = 2.5, family = "Arial")+
facet_wrap(~Gene, scales = "free_x", ncol = 3)+
scale_y_log10() +
xlab("")+
ylab("#SVs")+
theme_bw(base_size = 9, base_family = "Arial")+
theme(
panel.grid = element_blank()
)+
scale_fill_manual(values = c("goldenrod", "skyblue"))+
scale_color_manual(values = c("grey", "brown"))+
guides(fill = guide_legend(title = "Status"))+
geom_text(data = distinct(d2, n, .keep_all = T), size = 2.5, family = "Arial",
aes(x = CDK12, y = 0.05, label = paste("n=",n, sep= "")))
MMR
library(patchwork)
| /Code/Old/Figures/Fig2-4/Old/Vignettes/ATRX/ATRX_new_figure.R | no_license | SimonGrund/DDR_Predict | R | false | false | 5,684 | r | library(data.table)
library(tidyverse)
library(scales)
library(extrafont)
setwd("/Volumes/Macintosh HD/Users/au460892/Desktop/R-projects/DDR_project_Jan_2021/")
data = fread("Data/LOF_all.tsv")
all_samples = fread("Data/all_donors.tsv")
features = fread("Data/COMBINED_featureCounts.tsv")
colnames(features) = str_replace_all(colnames(features), "-",".")
colnames(features) = str_replace_all(colnames(features), ">","_._")
data = filter(data, Sample_ID %in% features$Sample_ID)
table(!duplicated(data$Sample_ID))
features$Signature.1 = NULL
#Increase in SV overall
source("Figures/Figure_4_new/Vignettes/GF/Feature_enrichment_SUMMED_FEAT.R")
ATRX =feat_enrich_summedFeat(gene = "ATRX",cohort = c("CNS"),
feat = grep("cluster", colnames(features), value = T)
)
ATRX2 = d2
IDH1 =feat_enrich_summedFeat(gene = "IDH1",cohort = c("CNS"),
feat = grep("cluster", colnames(features), value = T)
)
IDH12 = d2
d2 = bind_rows(ATRX2, IDH12)%>%na.omit()%>%
rename(Feature = name)
#Annotate patients with mutation in both IDH1 and ATRX
ATRX = filter(d2, Gene == "ATRX", CDK12 == "LOF")
IDH1 = filter(d2, Gene == "IDH1", CDK12 == "LOF")
both_lof = filter(ATRX, Sample_ID %in% IDH1$Sample_ID)%>%mutate(Gene="ATRX+IDH1")
#WT
ATRX = filter(d2, Gene == "ATRX", CDK12 == "WT")
IDH1 = filter(d2, Gene == "IDH1", CDK12 == "WT")
both_wt = filter(ATRX, Sample_ID %in% IDH1$Sample_ID)%>%mutate(Gene="ATRX+IDH1")
#Remove ATRX and IDH1 mutated samples with mutations in both
single_lof = filter(d2, Sample_ID %in% both_lof$Sample_ID & Gene %in% c("ATRX", "IDH1"))
d2 = bind_rows(d2, both_lof, both_wt)
d2 = anti_join(d2, single_lof)
for(gene in unique(d2$Gene)){
for(ct in unique(d2$primaryTumorLocation)){
try({
x = filter(d2, Gene == gene & primaryTumorLocation == ct & CDK12 == "LOF")
y = filter(d2, Gene == gene & primaryTumorLocation == ct & CDK12 == "WT")
d2$p[d2$Gene == gene & d2$primaryTumorLocation == ct] = wilcox.test(x$value, y$value)$p.value
})
}
}
d2$Feature = "Summed #SVs per patient"
d2 = d2%>%group_by(CDK12, Gene)%>%mutate(n = n())%>%ungroup()
d2 = filter(d2, CDK12 != "VUS")
d2 = mutate(d2, CDK12 = ifelse(CDK12 == "LOF", "RB1-d", "RB1-wt"))
#d2$gene = factor(d2$Gene, levels = c("IDH1+ATRX", "ATRX", "IDH1", "BRCA2"))
SV = ggplot(filter(d2, CDK12 != "VUS"), aes(x = CDK12, y = value+0.1))+
geom_boxplot(outlier.shape = NA, width= 0.6)+
geom_jitter(width = 0.1, size = 0.3, alpha = 0.5)+
ggpubr::stat_compare_means(comparisons = list(c("RB1-d","RB1-wt")), paired = F,
method = "wilcox.test", size = 2.5, family = "Arial")+
facet_wrap(~Gene, scales = "free_x", ncol = 3)+
scale_y_log10() +
xlab("")+
ylab("#SVs")+
theme_bw(base_size = 9, base_family = "Arial")+
theme(
panel.grid = element_blank()
)+
scale_fill_manual(values = c("goldenrod", "skyblue"))+
scale_color_manual(values = c("grey", "brown"))+
guides(fill = guide_legend(title = "Status"))+
geom_text(data = distinct(d2, n, .keep_all = T), size = 2.5, family = "Arial",
aes(x = CDK12, y = 0.05, label = paste("n=",n, sep= "")))
SV
#Increase in MMR signatures summed ATRX and IDH1
#source("Figures/Figure_4_new/Vignettes/GF/Feature_enrichment_SUMMED_FEAT.R")
ATRX =feat_enrich_summedFeat(gene = c("ATRX"),cohort = c("CNS"),
feat = c("Signature.MMR2", "Signature.26")
)
ATRX2 = d2
IDH1 =feat_enrich_summedFeat(gene = c("IDH1"), cohort = c("CNS"),
feat = c("Signature.MMR2", "Signature.26")
)
IDH12 = d2
d2 = bind_rows(ATRX2, IDH12)%>%na.omit()%>%
rename(Feature = name)
#Annotate patients with mutation in both IDH1 and ATRX
ATRX = filter(d2, Gene == "ATRX", CDK12 == "LOF")
IDH1 = filter(d2, Gene == "IDH1", CDK12 == "LOF")
both_lof = filter(ATRX, Sample_ID %in% IDH1$Sample_ID)%>%mutate(Gene="ATRX+IDH1")
#WT
ATRX = filter(d2, Gene == "ATRX", CDK12 == "WT")
IDH1 = filter(d2, Gene == "IDH1", CDK12 == "WT")
both_wt = filter(ATRX, Sample_ID %in% IDH1$Sample_ID)%>%mutate(Gene="ATRX+IDH1")
#Remove ATRX and IDH1 mutated samples with mutations in both
single_lof = filter(d2, Sample_ID %in% both_lof$Sample_ID & Gene %in% c("ATRX", "IDH1"))
d2 = bind_rows(d2, both_lof, both_wt)
d2 = anti_join(d2, single_lof)
for(gene in unique(d2$Gene)){
for(ct in unique(d2$primaryTumorLocation)){
try({
x = filter(d2, Gene == gene & primaryTumorLocation == ct & CDK12 == "LOF")
y = filter(d2, Gene == gene & primaryTumorLocation == ct & CDK12 == "WT")
d2$p[d2$Gene == gene & d2$primaryTumorLocation == ct] = wilcox.test(x$value, y$value)$p.value
})
}
}
d2 = filter(d2, CDK12 != "VUS")
d2 = d2%>%group_by(CDK12, Gene)%>%mutate(n = n())%>%ungroup()
d2 = mutate(d2, CDK12 = ifelse(CDK12 == "LOF", "RB1-d", "RB1-wt"))
MMR = ggplot(filter(d2, CDK12 != "VUS"), aes(x = CDK12, y = value+0.1))+
geom_boxplot(outlier.shape = NA, width= 0.6)+
geom_jitter(width = 0.1, size = 0.3, alpha = 0.5)+
ggpubr::stat_compare_means(comparisons = list(c("RB1-d","RB1-wt")), paired = F,
method = "wilcox.test", size = 2.5, family = "Arial")+
facet_wrap(~Gene, scales = "free_x", ncol = 3)+
scale_y_log10() +
xlab("")+
ylab("#SVs")+
theme_bw(base_size = 9, base_family = "Arial")+
theme(
panel.grid = element_blank()
)+
scale_fill_manual(values = c("goldenrod", "skyblue"))+
scale_color_manual(values = c("grey", "brown"))+
guides(fill = guide_legend(title = "Status"))+
geom_text(data = distinct(d2, n, .keep_all = T), size = 2.5, family = "Arial",
aes(x = CDK12, y = 0.05, label = paste("n=",n, sep= "")))
MMR
library(patchwork)
|
selectmap <- function(var1, var2, obs, Xpoly, Ypoly, method = "") {
####################################################
# selection of a point
####################################################
if (method == "point") {
diff <- abs(var1 - as.numeric(Xpoly)) * (max(var2) - min(var2)) + abs(var2 - as.numeric(Ypoly)) * (max(var1) - min(var1))
if (min(diff[diff == min(diff)]/(max(var2) - min(var2))/(max(var1) - min(var1))) < 0.01) {
if (length(obs) == length(var1))
obs[diff == min(diff)] <- !obs[diff == min(diff)]
else
obs[diff == min(diff), ] <- !obs[diff == min(diff), ]
}
return(obs)
}
####################################################
# Selection d'un polygone
####################################################
if (method == "poly") {
polyg <- cbind(unlist(Xpoly), unlist(Ypoly))
pol <- st_sfc(st_polygon(list(polyg)))
my_points <- st_as_sf(data.frame(x = var1, y = var2), coords = c("x", "y"))
def <- as.vector(st_intersects(my_points, pol, sparse = FALSE))
if (length(obs) == length(var1))
obs[def] <- !obs[def]
else
obs[def, ] <- !obs[def, ]
return(obs)
}
}
| /R/selectmap.R | no_license | tibo31/GeoXp | R | false | false | 1,181 | r | selectmap <- function(var1, var2, obs, Xpoly, Ypoly, method = "") {
####################################################
# selection of a point
####################################################
if (method == "point") {
diff <- abs(var1 - as.numeric(Xpoly)) * (max(var2) - min(var2)) + abs(var2 - as.numeric(Ypoly)) * (max(var1) - min(var1))
if (min(diff[diff == min(diff)]/(max(var2) - min(var2))/(max(var1) - min(var1))) < 0.01) {
if (length(obs) == length(var1))
obs[diff == min(diff)] <- !obs[diff == min(diff)]
else
obs[diff == min(diff), ] <- !obs[diff == min(diff), ]
}
return(obs)
}
####################################################
# Selection d'un polygone
####################################################
if (method == "poly") {
polyg <- cbind(unlist(Xpoly), unlist(Ypoly))
pol <- st_sfc(st_polygon(list(polyg)))
my_points <- st_as_sf(data.frame(x = var1, y = var2), coords = c("x", "y"))
def <- as.vector(st_intersects(my_points, pol, sparse = FALSE))
if (length(obs) == length(var1))
obs[def] <- !obs[def]
else
obs[def, ] <- !obs[def, ]
return(obs)
}
}
|
library(snow)
testFunction<-function(x) {
for (i in 1:20){
temp <- rnorm(10000)
a<- mean(temp)
b<-median(temp)
result = a * temp + b
fit <- lm(result~temp)
}
return(1)
}
testparallel <- function(cores=1){
cl <- makeCluster(cores,type="SOCK")
elapsed <- system.time(clusterApply(cl, 1:12, testFunction))
stopCluster(cl)
return(elapsed[3])
}
cores <- c(1,2,3,4,6,8,12)
cores <- c(1,2,3,4,6)
time <- sapply(cores, testparallel)
par(mfrow = c(2,1))
plot(cores, time, type = "b", main = "total runtime")
plot(cores, time*cores, type = "b", main = "resource time")
result <- data.frame(cores, time, time*cores)
result
| /CommentedCode/02-Samplers/Benchmarks/SnowBenchmark.R | no_license | ghislainv/LearningBayes | R | false | false | 662 | r | library(snow)
testFunction<-function(x) {
for (i in 1:20){
temp <- rnorm(10000)
a<- mean(temp)
b<-median(temp)
result = a * temp + b
fit <- lm(result~temp)
}
return(1)
}
testparallel <- function(cores=1){
cl <- makeCluster(cores,type="SOCK")
elapsed <- system.time(clusterApply(cl, 1:12, testFunction))
stopCluster(cl)
return(elapsed[3])
}
cores <- c(1,2,3,4,6,8,12)
cores <- c(1,2,3,4,6)
time <- sapply(cores, testparallel)
par(mfrow = c(2,1))
plot(cores, time, type = "b", main = "total runtime")
plot(cores, time*cores, type = "b", main = "resource time")
result <- data.frame(cores, time, time*cores)
result
|
# Downloading the data
if(!file.exists('data.zip')){
url<-"https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
download.file(url,destfile = "data.zip")
}
unzip("data.zip")
# Reading the data
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
str(NEI)
str(SCC)
#Question 1
#Have total emissions from PM2.5 decreased in the United States from 1999 to 2008? Using the base plotting system, make a plot showing the total PM2.5 emission from all sources for each of the years 1999, 2002, 2005, and 2008.
barplot(
((aggregate(Emissions ~ year, NEI, sum))$Emissions),
col = 'blue',
xlab="Year",
ylab="PM2.5 Emissions",
main="Total PM2.5 Emissions by Year"
)
# Saving the file as png
dev.copy(png,"Plot1.png", width=480, height=480)
dev.off()
# Question2
# Have total emissions from PM2.5 decreased in the Baltimore City, Maryland (\color{red}{\verb|fips == "24510"|}fips == "24510") from 1999 to 2008? Use the base plotting system to make a plot answering this question.
baltimore <- NEI[NEI$fips=="24510",]
barplot(
((aggregate(Emissions ~ year, baltimore, sum))$Emissions),
col = 'red',
xlab="Year",
ylab="PM2.5 Emissions",
main="Baltimore Total PM2.5 Emissions by Year"
)
# Saving the file as png
dev.copy(png,"Plot2.png", width=480, height=480)
dev.off()
# Question 3
# Of the four types of sources indicated by the type (point, nonpoint, onroad, nonroad) variable, which of these four sources have seen decreases in emissions from 1999-2008 for Baltimore City? Which have seen increases in emissions from 1999-2008? Use the ggplot2 plotting system to make a plot answer this question.
library(ggplot2)
ggplot(baltimore,aes(factor(year),Emissions,fill=type)) +
geom_bar(stat="identity") +
theme_bw() + guides(fill=FALSE)+
facet_grid(.~type,scales = "free",space="free") +
labs(x="Year", y=expression("Total PM2.5 Emissions")) +
labs(title=expression("Baltimore PM2.5 Emissions by year by Source Type"))
# Saving the file as png
dev.copy(png,"Plot3.png", width=480, height=480)
dev.off()
# Question 4
# Across the United States, how have emissions from coal combustion-related sources changed from 1999-2008?
SCC_Name <- subset(SCC, select = c("SCC", "Short.Name"))
NEI_Name <- merge(NEI, SCC_Name, by.x="SCC", by.y="SCC", all=TRUE)
coal <- subset(NEI_Name, grepl('Coal',NEI_Name$Short.Name, fixed=TRUE), c("Emissions", "year","type", "Short.Name"))
coal <- coal[complete.cases(coal), ]
ggplot(coal,aes(factor(year),Emissions)) +
geom_bar(stat="identity", fill='blue') +
theme_bw() + guides(fill=FALSE)+
labs(x="Year", y=expression("Total PM2.5 Emissions")) +
labs(title=expression("USA Coal PM2.5 Emissions by year by Source Type"))
# Saving the file as png
dev.copy(png,"Plot4.png", width=480, height=480)
dev.off()
# Question 5
# How have emissions from motor vehicle sources changed from 1999-2008 in Baltimore City?
balMotor <- subset(baltimore, NEI$type == "ON-ROAD")
balMotorAgg <- aggregate(Emissions ~ year, balMotor, sum)
ggplot(balMotorAgg,aes(factor(year),Emissions)) +
geom_bar(stat="identity", fill='red') +
theme_bw() + guides(fill=FALSE)+
labs(x="Year", y=expression("Total PM2.5 Emissions")) +
labs(title=expression("Baltimore Total PM2.5 Motor Emissions by Year"))
# Saving the file as png
dev.copy(png,"Plot5.png", width=480, height=480)
dev.off()
# Question 6
# Compare emissions from motor vehicle sources in Baltimore City with emissions from motor vehicle sources in Los Angeles County, California (\color{red}{\verb|fips == "06037"|}fips == "06037"). Which city has seen greater changes over time in motor vehicle emissions?
library("plyr")
balLAMotor <- subset(NEI, NEI$fips %in% c("24510","06037") & NEI$type == "ON-ROAD")
balLAMotorAgg <- aggregate(Emissions ~ year + fips, balLAMotor, sum)
balLAMotorAgg <- rename(balLAMotorAgg, c("fips"="City"))
balLAMotorAgg$City <- factor(balLAMotorAgg$City, levels=c("06037", "24510"), labels=c("Los Angeles", "Baltimore"))
ggplot(balLAMotorAgg, aes(x=factor(year), y=Emissions, fill=City)) +
geom_bar(aes(fill=year),stat="identity") +
facet_grid(.~City, scales = "free",space="free") +
guides(fill=FALSE) + theme_bw() +
labs(x="Year", y=expression("Total PM2.5 Emissions")) +
labs(title=expression("Total PM2.5 Motor Emissions in Los Angeles and Baltimore"))
# Saving the file as png
dev.copy(png,"Plot6.png", width=480, height=480)
dev.off()
| /Exploratory Data Analysis/Assignment2/Assignment2.R | no_license | LeaLGgit/datasciencecoursera | R | false | false | 4,569 | r |
# Downloading the data
if(!file.exists('data.zip')){
url<-"https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
download.file(url,destfile = "data.zip")
}
unzip("data.zip")
# Reading the data
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
str(NEI)
str(SCC)
#Question 1
#Have total emissions from PM2.5 decreased in the United States from 1999 to 2008? Using the base plotting system, make a plot showing the total PM2.5 emission from all sources for each of the years 1999, 2002, 2005, and 2008.
barplot(
((aggregate(Emissions ~ year, NEI, sum))$Emissions),
col = 'blue',
xlab="Year",
ylab="PM2.5 Emissions",
main="Total PM2.5 Emissions by Year"
)
# Saving the file as png
dev.copy(png,"Plot1.png", width=480, height=480)
dev.off()
# Question2
# Have total emissions from PM2.5 decreased in the Baltimore City, Maryland (\color{red}{\verb|fips == "24510"|}fips == "24510") from 1999 to 2008? Use the base plotting system to make a plot answering this question.
baltimore <- NEI[NEI$fips=="24510",]
barplot(
((aggregate(Emissions ~ year, baltimore, sum))$Emissions),
col = 'red',
xlab="Year",
ylab="PM2.5 Emissions",
main="Baltimore Total PM2.5 Emissions by Year"
)
# Saving the file as png
dev.copy(png,"Plot2.png", width=480, height=480)
dev.off()
# Question 3
# Of the four types of sources indicated by the type (point, nonpoint, onroad, nonroad) variable, which of these four sources have seen decreases in emissions from 1999-2008 for Baltimore City? Which have seen increases in emissions from 1999-2008? Use the ggplot2 plotting system to make a plot answer this question.
library(ggplot2)
ggplot(baltimore,aes(factor(year),Emissions,fill=type)) +
geom_bar(stat="identity") +
theme_bw() + guides(fill=FALSE)+
facet_grid(.~type,scales = "free",space="free") +
labs(x="Year", y=expression("Total PM2.5 Emissions")) +
labs(title=expression("Baltimore PM2.5 Emissions by year by Source Type"))
# Saving the file as png
dev.copy(png,"Plot3.png", width=480, height=480)
dev.off()
# Question 4
# Across the United States, how have emissions from coal combustion-related sources changed from 1999-2008?
SCC_Name <- subset(SCC, select = c("SCC", "Short.Name"))
NEI_Name <- merge(NEI, SCC_Name, by.x="SCC", by.y="SCC", all=TRUE)
coal <- subset(NEI_Name, grepl('Coal',NEI_Name$Short.Name, fixed=TRUE), c("Emissions", "year","type", "Short.Name"))
coal <- coal[complete.cases(coal), ]
ggplot(coal,aes(factor(year),Emissions)) +
geom_bar(stat="identity", fill='blue') +
theme_bw() + guides(fill=FALSE)+
labs(x="Year", y=expression("Total PM2.5 Emissions")) +
labs(title=expression("USA Coal PM2.5 Emissions by year by Source Type"))
# Saving the file as png
dev.copy(png,"Plot4.png", width=480, height=480)
dev.off()
# Question 5
# How have emissions from motor vehicle sources changed from 1999-2008 in Baltimore City?
balMotor <- subset(baltimore, NEI$type == "ON-ROAD")
balMotorAgg <- aggregate(Emissions ~ year, balMotor, sum)
ggplot(balMotorAgg,aes(factor(year),Emissions)) +
geom_bar(stat="identity", fill='red') +
theme_bw() + guides(fill=FALSE)+
labs(x="Year", y=expression("Total PM2.5 Emissions")) +
labs(title=expression("Baltimore Total PM2.5 Motor Emissions by Year"))
# Saving the file as png
dev.copy(png,"Plot5.png", width=480, height=480)
dev.off()
# Question 6
# Compare emissions from motor vehicle sources in Baltimore City with emissions from motor vehicle sources in Los Angeles County, California (\color{red}{\verb|fips == "06037"|}fips == "06037"). Which city has seen greater changes over time in motor vehicle emissions?
library("plyr")
balLAMotor <- subset(NEI, NEI$fips %in% c("24510","06037") & NEI$type == "ON-ROAD")
balLAMotorAgg <- aggregate(Emissions ~ year + fips, balLAMotor, sum)
balLAMotorAgg <- rename(balLAMotorAgg, c("fips"="City"))
balLAMotorAgg$City <- factor(balLAMotorAgg$City, levels=c("06037", "24510"), labels=c("Los Angeles", "Baltimore"))
ggplot(balLAMotorAgg, aes(x=factor(year), y=Emissions, fill=City)) +
geom_bar(aes(fill=year),stat="identity") +
facet_grid(.~City, scales = "free",space="free") +
guides(fill=FALSE) + theme_bw() +
labs(x="Year", y=expression("Total PM2.5 Emissions")) +
labs(title=expression("Total PM2.5 Motor Emissions in Los Angeles and Baltimore"))
# Saving the file as png
dev.copy(png,"Plot6.png", width=480, height=480)
dev.off()
|
# Lecture 7 - Introduction to Machine Learning
# ============================================
# 1. Introduction
# ---------------
# In the previous lectures we learned about basic mechanics of R. Knowledge in
# itself is almost never a goal, but the application of knowledge. In this
# lecture we will provide overview of machine learning concept and in subsequent
# lectures we will show how R can be used to build a model predicting who will
# survive Titanic disaster, and maybe some other stuff too.
# 2. What's a Prediction?
# -----------------------
# To kick things off, first, we have to ask a very basic question - what's a
# prediction? A prediction is a guess which is basing its rationale on patterns
# discovered in historical data. In layman terms - we have historical data, and
# we identify some patterns. Then we get some new data that we have never seen
# before and we expect that the same pattern will exist there too. We apply our
# previously identified pattern to new data, and derive predictions on that
# basis.
# For instance we discovered in our historical records that people over 65 are
# 75% likely to get heart attack in the next 10 years. When we get some new data
# (new person - John who's 71) - we can make a prediction that he will get a
# heart attack in the next 10 years too.
# Interesting prediction, scientist are predicting, that 50% of people alive
# today will suffer from cancer in their lifetime. It's purely because we live
# longer, and our immune system weakens as we get older.
# Although we didn't get far from the start we already managed to hit one of the
# biggest challenges in this domain. Given historical data we assume that the
# same pattern will exist in the future. "Assume" is not good enough in this
# case. There is a mathematical model - called VC theory (from initials of its
# authors, Vapnik and Chervonenkis.) This theory explains what, from
# mathematical standpoint, it means "to learn" and builds a foundation for the
# entire statistical machine learning domain. In here we are only going to
# scratch the surface. For more details check Caltech course (see references.)
# In order to be sure that discovered pattern exists in future data, we are
# going to validate our model each time. How we validate is largely dictated by
# by the VC theory. You can think of it in this way - adhering to the theory is
# like a warranty. If you follow basic steps/rules of thumb you can be sure that
# what you learned from your data (=discovered patterns) is valid.
# 3. Example of a Model
# ---------------------
# To get a better intuition let's investigate a very simple example. Back in the
# days, banks used to manually process credit requests. People used to file
# application in which they were stating their age, obtained education, current
# salary, years in residence, outstanding debt and many more. Next, bank clerk
# was reading it, evaluation, and giving his decision - whether to extend a
# credit to that customer or not.
# Our job is to write a program which can do clerk's job automatically - it
# reads the application and makes the decision - whether to extend the credit or
# deny. Moreover, it should do this job as good as human clerk.
# We know, that bank has already processed number of applications over the
# years, and we could use them in order to learn (identify historical patterns.)
# As you can imagine applications which bank processed in the past are just a
# fraction of all possible applications that people can write. The question is -
# if we identify some pattern in our historical applications, what are the
# chances that this pattern holds in future applications (the ones we haven't
# seen yet.) This question is answered by Hoeffding's inequality (which leads to
# VC theory.) But, we are not going to dig any deeper here.
# Instead, think of it in this way. We have our 10,000 applications. For each
# application we have the decision that was made (to extend or deny a credit
# line.) We are going to split our data (10,000 applications) into 2 groups:
# - training data (80%)
# - testing data (20%)
# The proportions (80-20) are derived from mathematical formulas, but in most
# of the cases the 80-20 split can be applied as a rule of thumb and it gives a
# good generalization. Generalization means that pattern(s) identified in seen
# data will be present in the unseen (new) data too.
# Now, we split data for a purpose. In order to learn patterns we are going to
# use the training data only. We don't snoop onto the testing data at any point.
# The purpose is to simulate realistic learning environment as much as
# possible. After identifying the pattern (=learning) we want to find out how in
# realistic scenario this pattern will work. This is the reason why we left out
# the testing data (20% of applications) outside learning. For each application
# in this set we know the final outcome - extended or denied credit line. We
# can use our model (learned on 80% of remaining applications) and try to guess
# whether the credit, according to out model, should be extended or denied. Next
# we compare with actual decisions that were made to see how well our model
# approximates decisions made by human-clerks. This is called model evaluation
# (some also call it validation.)
# We jumped straight to the models - just to be sure we're on the same page - a
# simple model could be something like if the applicant is in their 20s, doesn't
# have outstanding debt and worked for at least 5 years, extend credit,
# otherwise deny. As you can imagine, there could be more complicated rules -
# this is purely for illustration purpose.
# 3.1. When We Should Apply Machine Learning?
# -------------------------------------------
# Paragraph above describes very simplistic model. In some cases this kind of
# rules are well defined - there's no point of using machine learning techniques
# to uncover this patterns as they are already known. We only want to use this
# techniques to discover patterns which we are not able to pin down
# mathematically.
# 3.2. Prerequisites For Machine Learning?
# ----------------------------------------
# Next question we have to ask ourselves is - when machine learning is really
# applicable? Can every pattern be discovered by those algorithms - NO. Machine
# learning relies on learning from data, so if we don't have historical data we
# are out of business at the very beginning (also when we have small volume.)
# Next we have to ask ourselves - whether the pattern exists in the data. Say we
# have data of someone flipping the coin 10,000 times and recording whether
# they flipped head or tail each time. If we'll try to predict what are they
# going to flip next - we (our algorithm) might discover some patterns but are
# they realistic? Hopefully this will be caught during our model evaluation.
# And at the end, we are not able to pin the pattern down mathematically.
# Otherwise why bother, right?
# To recap:
# - we have data
# - the pattern exists
# - we cannot pin it down mathematically
# 4. Significance of the 80-20 Rule
# ---------------------------------
# One could ask - surely you've got some good algorithms, why evaluate them in
# this case. Someone checked them before publishing, right?
# The algorithm might be good, but its results (identified patterns) rely on the
# data, as pattern is inherently in the data - the method is only used to
# discover it. If you have a rubbish data - you will get a rubbish (=useless)
# pattern. And this is the point when you want to know - whether what you just
# learned has a value.
# Another angle to look at this problem is a bit more extreme. Let's say that
# our model sort of memorized all learning data - it's like you would memorize
# all the answers from the textbook without knowing how to calculate them. When
# you're checking your results with answers at the back of the book you're
# getting everything perfectly - well, you memorized it perfectly. However, when
# you arrive in the exam - it's not so great any more. Pretty much the same can
# happen in machine learning.
# 5. Overfitting
# --------------
# Above example (the extreme case) is closely related to overfitting (you will
# here it a lot in machine learning.) To give you a better intuition what it is
# let's say we have group of people which are characterized by their age and the
# average volume of milk they consume. (20yo, 1galon), (34yo, .2g) and so on. We
# can plot this data on the scatter plot, and next we can provide best fit line.
# If we are going to use a linear model there will be some errors (usually we
# would apply mean squared error in this case.) However, if we would use higher
# order polynomial to fit the data we would get better error rate (=lower).
# On a side note - this is the main purpose of machine learning. You create an
# error function (mean of squared errors from every point, for example) and then
# the job of the algorithm is to minimize its value by tweaking parameters of
# the model. When you hit error level that is acceptable to you, you get your
# model and report its all parameters as your trained model - ready to be
# evaluated on a testing data.
# You can think of it as calculating gradient of error function over each
# parameter and equating it to zero, and eventually finding minimum of
# hyperplane. If it doesn't make any sense to you, just ignore this paragraph.
# Coming back to best fit function - linear model might not be as good as
# 55-order polynomial - judging by the error it generates. However, there's a
# catch. Data is rarely clean - it's almost always noisy. If you use high order
# polynomial you most likely learning the noise rather than the general pattern
# that occurs in your data.
# If you think you don't have noisy data... you're the luckiest person on Earth,
# or you better think some more.
# To summarize it, overfitting model generalize poorly.
# The obvious questions one can ask - how can I know if I'm overfitting, and is
# there a way to prevent this from happening? The answer is regularization.
# 6. Regularization
# -----------------
# Overfitting, by and large, happens because we use model which is too complex
# for our data. Basically, the job of regularization is to reduce complexity of
# the model - rather than using 8-order polynomial, it encourages to use lower
# order, say 3.
# We can achieve this in 2 ways.
# - by reducing number of independent variables in our training data set,
# - by applying various smoothers in our models.
# 6.1. Reducing variables
# -----------------------
# The more variables we have (columns in our training data set when we look at
# it as a grid) the more challenging it becomes to fit them well. Sometimes it
# might be the case, that with fewer variables we are able to create a model
# which has lower complexity and fits data at acceptable rate.
# This is also called removing VC dimensions, which is derived from VC theory
# mentioned earlier. One reason you would like to do it, is the more variables
# you use, the more training data you need to have in order to be sure that
# whatever model you created is generalizing - creating any model from 10,000
# variables is easy. The problem is finding enough data which will guarantee you
# that whatever you learned has any value. To make it more concrete example -
# from 10,000 variables you derived model which predicts likelihood of getting
# heart attack. However, your predicted probability has error margin - in this
# case it's for example 44%. So when you predicting that someone will get heart
# attack with 56%, what you actually say, they will get heart attack with 52%
# +/- 44% which gives you a range of 8% - 96% where your actual likelihood lies.
# Yup, you are right - this prediction is utterly useless, as it pretty much
# says that everything can happen.
# 6.2. Applying Smoothing Techniques
# ----------------------------------
# In layman terms there are number of techniques, depending on your learning
# algorithm which can "discourage" your model from getting too complex. If you
# think of hyperplane created by high order polynomial - you could observe loads
# of local minimums/maximums. Smoothing aims to reduce those spikes - smooth
# them out so they are closer to the general surface of your hyperplane.
# There are many regularizors available, just to name few:
# - Tikhonov regularization
# - LASSO (which stands for least absolute shrinkage and selection operator)
# They are used in different situations, and their aim is to increase
# generalization by parameters shrinkage, and some also variable selection. In
# other words it makes some variables contribute less to final prediction, and
# in some techniques can also remove non-influential variables.
# 7. Cross Validation
# -------------------
# Suppose you have data of some patients, and you would like to predict what's
# the likelihood they will die in the next 5 years. You've got plenty of
# variables - their age, sex, medical history, etc. Your job is to find a
# pattern and report how accurate it is.
# As you know there are plenty of algorithms which can learn from data (e.g.
# decision trees, neural networks, even perceptron.)
# Each algorithm can identify different pattern, and you would only like to use
# the pattern which gives you the best predictions. So at best, you would like
# to run all the algorithms, and evaluate all learned models, compare them with
# each other and report only the one which has the highest accuracy (we'll talk
# about measurements in the next lecture.)
# According to what we have learned to far, we would split our data into 2 sets:
# training set and testing set. Then we would use training set to generate a
# model using first algorithm, evaluate using testing set. Next we would do the
# same with 2nd, 3rd and all other algorithms. Do you see any problem with that?
# The problem is that our testing set became part of our learning model, because
# it was used to evaluate all models. In other words - we trained many models
# and in the end selected one which works best for out testing data. So testing
# data not only provides the information how good it is, but also guides our
# final decision (and that's wrong!) Well, not categorically wrong, but you
# provide an optimistic accuracy of your model - which is not really something
# you should do.
# To overcome this, we use cross validation. How does it work? At first we split
# our data (at random) into:
# - testing set (20%)
# - training set (80%)
# From now on, we are only going to play with testing set. We take our training
# set and split it, at random, into 2 subsets:
# - validation (20% of all the data)
# - (sub)training (60% of all the data)
# We use our first algorithm to learn (train the model) from those 60%. Next, we
# evaluate the model on validation set (20%). We write down how well it did.
# Next we take again our testing set (80%) and again split, at random, into
# validation set (20%) and subtesting (60%). On the new subtesting set we run
# our 2nd algorithm, which trains another model, then we evaluate this model
# against validation set (20%), and save its performance result.
# We do that for all other algorithms we want to run. At the end, we pick the
# one which performed best, and run this model against testing data (20%) which
# hasn't been used so far in our experiment. It gives us some accuracy, and this
# is something we report. We provide this model, but we say it's accurate and
# here we provide what we measured against testing data.
# In this way testing data is only used once, and doesn't guide selection of
# the model. To look at it from the other angle - you're trying to simulate
# realistic learning process, and you want to know how well you are learning,
# not just report a high accuracy.
# The above scenario is a bit simplistic view, sometimes, you can run the same
# model multiple times on different validation sets, and average errors. You
# could write so much more about on this topic.
# 7.1. Other Techniques
# ---------------------
# Cross validation is not the only approach. As we alluded in the previous
# section this subject was heavily researched over the last years, and people
# came up with number of methods. Usually each method comes with its own
# trade-offs and they should guide your final decision, which one you should
# use.
# For instance another widely used method is K-fold validation. However it comes
# at a price.You as a user have to decide which K you would like to use. This
# decision has following impact on your model:
# - larger K leads to smaller bias and larger variance
# - smaller K leads to larger bias and smaller variance
# 8. Closing Remarks
# ------------------
# I feel like I'm committing a crime here by not introducing any maths. For more
# comprehensive introduction to machine learning please check Caltech course
# mentioned in references.
# If you want to be serious about Data Science, I think it's pretty important to
# have a very good understanding of the basics of machine learning, as it will
# give you more confidence in your work.
# Also, very often we are not only interested in getting a prediction as "yes"
# or "no". Sometimes business will demand that our analyses are easily
# interpretable and maybe even scalable.
# Interpretable analyses - when we provide a prediction that this patient will
# die, or not - our client who requested this analyses would like to know why
# this patient is expected to die because maybe there's something we can do
# extra to change it. And this is something where knowledge about different
# algorithms (also how they work) comes at play - some will be easy to interpret
# like decision tree, some a bit harder like neural networks or SVMs (although
# I believe you can still do it, it's just harder.)
# Scalability is about addressing problem of large volume of data. If you have
# inefficient algorithms it will become very slow when you drastically increase
# the volume of training data. This is very often the case of regularization
# algorithms, which rely on quadratic programming.
# In the next lecture we'll do some practical machine learning using caret
# package. | /lectures/10 Lecture 9 - Introduction to Machine Learning (Theory).R | no_license | DrRoad/Data-science-tutorial | R | false | false | 18,508 | r | # Lecture 7 - Introduction to Machine Learning
# ============================================
# 1. Introduction
# ---------------
# In the previous lectures we learned about basic mechanics of R. Knowledge in
# itself is almost never a goal, but the application of knowledge. In this
# lecture we will provide overview of machine learning concept and in subsequent
# lectures we will show how R can be used to build a model predicting who will
# survive Titanic disaster, and maybe some other stuff too.
# 2. What's a Prediction?
# -----------------------
# To kick things off, first, we have to ask a very basic question - what's a
# prediction? A prediction is a guess which is basing its rationale on patterns
# discovered in historical data. In layman terms - we have historical data, and
# we identify some patterns. Then we get some new data that we have never seen
# before and we expect that the same pattern will exist there too. We apply our
# previously identified pattern to new data, and derive predictions on that
# basis.
# For instance we discovered in our historical records that people over 65 are
# 75% likely to get heart attack in the next 10 years. When we get some new data
# (new person - John who's 71) - we can make a prediction that he will get a
# heart attack in the next 10 years too.
# Interesting prediction, scientist are predicting, that 50% of people alive
# today will suffer from cancer in their lifetime. It's purely because we live
# longer, and our immune system weakens as we get older.
# Although we didn't get far from the start we already managed to hit one of the
# biggest challenges in this domain. Given historical data we assume that the
# same pattern will exist in the future. "Assume" is not good enough in this
# case. There is a mathematical model - called VC theory (from initials of its
# authors, Vapnik and Chervonenkis.) This theory explains what, from
# mathematical standpoint, it means "to learn" and builds a foundation for the
# entire statistical machine learning domain. In here we are only going to
# scratch the surface. For more details check Caltech course (see references.)
# In order to be sure that discovered pattern exists in future data, we are
# going to validate our model each time. How we validate is largely dictated by
# by the VC theory. You can think of it in this way - adhering to the theory is
# like a warranty. If you follow basic steps/rules of thumb you can be sure that
# what you learned from your data (=discovered patterns) is valid.
# 3. Example of a Model
# ---------------------
# To get a better intuition let's investigate a very simple example. Back in the
# days, banks used to manually process credit requests. People used to file
# application in which they were stating their age, obtained education, current
# salary, years in residence, outstanding debt and many more. Next, bank clerk
# was reading it, evaluation, and giving his decision - whether to extend a
# credit to that customer or not.
# Our job is to write a program which can do clerk's job automatically - it
# reads the application and makes the decision - whether to extend the credit or
# deny. Moreover, it should do this job as good as human clerk.
# We know, that bank has already processed number of applications over the
# years, and we could use them in order to learn (identify historical patterns.)
# As you can imagine applications which bank processed in the past are just a
# fraction of all possible applications that people can write. The question is -
# if we identify some pattern in our historical applications, what are the
# chances that this pattern holds in future applications (the ones we haven't
# seen yet.) This question is answered by Hoeffding's inequality (which leads to
# VC theory.) But, we are not going to dig any deeper here.
# Instead, think of it in this way. We have our 10,000 applications. For each
# application we have the decision that was made (to extend or deny a credit
# line.) We are going to split our data (10,000 applications) into 2 groups:
# - training data (80%)
# - testing data (20%)
# The proportions (80-20) are derived from mathematical formulas, but in most
# of the cases the 80-20 split can be applied as a rule of thumb and it gives a
# good generalization. Generalization means that pattern(s) identified in seen
# data will be present in the unseen (new) data too.
# Now, we split data for a purpose. In order to learn patterns we are going to
# use the training data only. We don't snoop onto the testing data at any point.
# The purpose is to simulate realistic learning environment as much as
# possible. After identifying the pattern (=learning) we want to find out how in
# realistic scenario this pattern will work. This is the reason why we left out
# the testing data (20% of applications) outside learning. For each application
# in this set we know the final outcome - extended or denied credit line. We
# can use our model (learned on 80% of remaining applications) and try to guess
# whether the credit, according to out model, should be extended or denied. Next
# we compare with actual decisions that were made to see how well our model
# approximates decisions made by human-clerks. This is called model evaluation
# (some also call it validation.)
# We jumped straight to the models - just to be sure we're on the same page - a
# simple model could be something like if the applicant is in their 20s, doesn't
# have outstanding debt and worked for at least 5 years, extend credit,
# otherwise deny. As you can imagine, there could be more complicated rules -
# this is purely for illustration purpose.
# 3.1. When We Should Apply Machine Learning?
# -------------------------------------------
# Paragraph above describes very simplistic model. In some cases this kind of
# rules are well defined - there's no point of using machine learning techniques
# to uncover this patterns as they are already known. We only want to use this
# techniques to discover patterns which we are not able to pin down
# mathematically.
# 3.2. Prerequisites For Machine Learning?
# ----------------------------------------
# Next question we have to ask ourselves is - when machine learning is really
# applicable? Can every pattern be discovered by those algorithms - NO. Machine
# learning relies on learning from data, so if we don't have historical data we
# are out of business at the very beginning (also when we have small volume.)
# Next we have to ask ourselves - whether the pattern exists in the data. Say we
# have data of someone flipping the coin 10,000 times and recording whether
# they flipped head or tail each time. If we'll try to predict what are they
# going to flip next - we (our algorithm) might discover some patterns but are
# they realistic? Hopefully this will be caught during our model evaluation.
# And at the end, we are not able to pin the pattern down mathematically.
# Otherwise why bother, right?
# To recap:
# - we have data
# - the pattern exists
# - we cannot pin it down mathematically
# 4. Significance of the 80-20 Rule
# ---------------------------------
# One could ask - surely you've got some good algorithms, why evaluate them in
# this case. Someone checked them before publishing, right?
# The algorithm might be good, but its results (identified patterns) rely on the
# data, as pattern is inherently in the data - the method is only used to
# discover it. If you have a rubbish data - you will get a rubbish (=useless)
# pattern. And this is the point when you want to know - whether what you just
# learned has a value.
# Another angle to look at this problem is a bit more extreme. Let's say that
# our model sort of memorized all learning data - it's like you would memorize
# all the answers from the textbook without knowing how to calculate them. When
# you're checking your results with answers at the back of the book you're
# getting everything perfectly - well, you memorized it perfectly. However, when
# you arrive in the exam - it's not so great any more. Pretty much the same can
# happen in machine learning.
# 5. Overfitting
# --------------
# Above example (the extreme case) is closely related to overfitting (you will
# here it a lot in machine learning.) To give you a better intuition what it is
# let's say we have group of people which are characterized by their age and the
# average volume of milk they consume. (20yo, 1galon), (34yo, .2g) and so on. We
# can plot this data on the scatter plot, and next we can provide best fit line.
# If we are going to use a linear model there will be some errors (usually we
# would apply mean squared error in this case.) However, if we would use higher
# order polynomial to fit the data we would get better error rate (=lower).
# On a side note - this is the main purpose of machine learning. You create an
# error function (mean of squared errors from every point, for example) and then
# the job of the algorithm is to minimize its value by tweaking parameters of
# the model. When you hit error level that is acceptable to you, you get your
# model and report its all parameters as your trained model - ready to be
# evaluated on a testing data.
# You can think of it as calculating gradient of error function over each
# parameter and equating it to zero, and eventually finding minimum of
# hyperplane. If it doesn't make any sense to you, just ignore this paragraph.
# Coming back to best fit function - linear model might not be as good as
# 55-order polynomial - judging by the error it generates. However, there's a
# catch. Data is rarely clean - it's almost always noisy. If you use high order
# polynomial you most likely learning the noise rather than the general pattern
# that occurs in your data.
# If you think you don't have noisy data... you're the luckiest person on Earth,
# or you better think some more.
# To summarize it, overfitting model generalize poorly.
# The obvious questions one can ask - how can I know if I'm overfitting, and is
# there a way to prevent this from happening? The answer is regularization.
# 6. Regularization
# -----------------
# Overfitting, by and large, happens because we use model which is too complex
# for our data. Basically, the job of regularization is to reduce complexity of
# the model - rather than using 8-order polynomial, it encourages to use lower
# order, say 3.
# We can achieve this in 2 ways.
# - by reducing number of independent variables in our training data set,
# - by applying various smoothers in our models.
# 6.1. Reducing variables
# -----------------------
# The more variables we have (columns in our training data set when we look at
# it as a grid) the more challenging it becomes to fit them well. Sometimes it
# might be the case, that with fewer variables we are able to create a model
# which has lower complexity and fits data at acceptable rate.
# This is also called removing VC dimensions, which is derived from VC theory
# mentioned earlier. One reason you would like to do it, is the more variables
# you use, the more training data you need to have in order to be sure that
# whatever model you created is generalizing - creating any model from 10,000
# variables is easy. The problem is finding enough data which will guarantee you
# that whatever you learned has any value. To make it more concrete example -
# from 10,000 variables you derived model which predicts likelihood of getting
# heart attack. However, your predicted probability has error margin - in this
# case it's for example 44%. So when you predicting that someone will get heart
# attack with 56%, what you actually say, they will get heart attack with 52%
# +/- 44% which gives you a range of 8% - 96% where your actual likelihood lies.
# Yup, you are right - this prediction is utterly useless, as it pretty much
# says that everything can happen.
# 6.2. Applying Smoothing Techniques
# ----------------------------------
# In layman terms there are number of techniques, depending on your learning
# algorithm which can "discourage" your model from getting too complex. If you
# think of hyperplane created by high order polynomial - you could observe loads
# of local minimums/maximums. Smoothing aims to reduce those spikes - smooth
# them out so they are closer to the general surface of your hyperplane.
# There are many regularizors available, just to name few:
# - Tikhonov regularization
# - LASSO (which stands for least absolute shrinkage and selection operator)
# They are used in different situations, and their aim is to increase
# generalization by parameters shrinkage, and some also variable selection. In
# other words it makes some variables contribute less to final prediction, and
# in some techniques can also remove non-influential variables.
# 7. Cross Validation
# -------------------
# Suppose you have data of some patients, and you would like to predict what's
# the likelihood they will die in the next 5 years. You've got plenty of
# variables - their age, sex, medical history, etc. Your job is to find a
# pattern and report how accurate it is.
# As you know there are plenty of algorithms which can learn from data (e.g.
# decision trees, neural networks, even perceptron.)
# Each algorithm can identify different pattern, and you would only like to use
# the pattern which gives you the best predictions. So at best, you would like
# to run all the algorithms, and evaluate all learned models, compare them with
# each other and report only the one which has the highest accuracy (we'll talk
# about measurements in the next lecture.)
# According to what we have learned to far, we would split our data into 2 sets:
# training set and testing set. Then we would use training set to generate a
# model using first algorithm, evaluate using testing set. Next we would do the
# same with 2nd, 3rd and all other algorithms. Do you see any problem with that?
# The problem is that our testing set became part of our learning model, because
# it was used to evaluate all models. In other words - we trained many models
# and in the end selected one which works best for out testing data. So testing
# data not only provides the information how good it is, but also guides our
# final decision (and that's wrong!) Well, not categorically wrong, but you
# provide an optimistic accuracy of your model - which is not really something
# you should do.
# To overcome this, we use cross validation. How does it work? At first we split
# our data (at random) into:
# - testing set (20%)
# - training set (80%)
# From now on, we are only going to play with testing set. We take our training
# set and split it, at random, into 2 subsets:
# - validation (20% of all the data)
# - (sub)training (60% of all the data)
# We use our first algorithm to learn (train the model) from those 60%. Next, we
# evaluate the model on validation set (20%). We write down how well it did.
# Next we take again our testing set (80%) and again split, at random, into
# validation set (20%) and subtesting (60%). On the new subtesting set we run
# our 2nd algorithm, which trains another model, then we evaluate this model
# against validation set (20%), and save its performance result.
# We do that for all other algorithms we want to run. At the end, we pick the
# one which performed best, and run this model against testing data (20%) which
# hasn't been used so far in our experiment. It gives us some accuracy, and this
# is something we report. We provide this model, but we say it's accurate and
# here we provide what we measured against testing data.
# In this way testing data is only used once, and doesn't guide selection of
# the model. To look at it from the other angle - you're trying to simulate
# realistic learning process, and you want to know how well you are learning,
# not just report a high accuracy.
# The above scenario is a bit simplistic view, sometimes, you can run the same
# model multiple times on different validation sets, and average errors. You
# could write so much more about on this topic.
# 7.1. Other Techniques
# ---------------------
# Cross validation is not the only approach. As we alluded in the previous
# section this subject was heavily researched over the last years, and people
# came up with number of methods. Usually each method comes with its own
# trade-offs and they should guide your final decision, which one you should
# use.
# For instance another widely used method is K-fold validation. However it comes
# at a price.You as a user have to decide which K you would like to use. This
# decision has following impact on your model:
# - larger K leads to smaller bias and larger variance
# - smaller K leads to larger bias and smaller variance
# 8. Closing Remarks
# ------------------
# I feel like I'm committing a crime here by not introducing any maths. For more
# comprehensive introduction to machine learning please check Caltech course
# mentioned in references.
# If you want to be serious about Data Science, I think it's pretty important to
# have a very good understanding of the basics of machine learning, as it will
# give you more confidence in your work.
# Also, very often we are not only interested in getting a prediction as "yes"
# or "no". Sometimes business will demand that our analyses are easily
# interpretable and maybe even scalable.
# Interpretable analyses - when we provide a prediction that this patient will
# die, or not - our client who requested this analyses would like to know why
# this patient is expected to die because maybe there's something we can do
# extra to change it. And this is something where knowledge about different
# algorithms (also how they work) comes at play - some will be easy to interpret
# like decision tree, some a bit harder like neural networks or SVMs (although
# I believe you can still do it, it's just harder.)
# Scalability is about addressing problem of large volume of data. If you have
# inefficient algorithms it will become very slow when you drastically increase
# the volume of training data. This is very often the case of regularization
# algorithms, which rely on quadratic programming.
# In the next lecture we'll do some practical machine learning using caret
# package. |
#Name: Anbarasan
#StudentID: 1508153
complete <- function(directory, id=1:332)
{
## directory is location of the csv files
## id is the montior ID number to be used
## Return a data frame of the form:
## id nobs
## 1 117
## 2 1047
## ..
## Where 'id' is the monitor ID number and 'nobs' is the no. of complete case
#Start
if(1 > min(id) || 332 < max(id)) {
print(paste("Error: id is out of range."))
return(NA)
}
Directory_File <-list.files(directory,full.names = TRUE)
# step 2 create a empty vector object
v <-vector()
# do for loop, to get the data for every csv by reading the file
# store each records in vector with sum of compltete.case of the records
# without na values
for(i in 1:length(id))
{
#read.csv -> reads a file in table format and creates a date frame from it.
records <-c(read.csv(Directory_File[id[i]]))
# complete.cases -> Return a logical vector indicating which cases are complete, i.e., have no missing values.
v[i] <-sum(complete.cases(records))
}
#create data.frame with Id, with the value of nobs
final_data <-data.frame(id,nobs=v)
return(final_data)
} | /Part A/complete.R | no_license | anbarisker/datascienceunitec | R | false | false | 1,166 | r | #Name: Anbarasan
#StudentID: 1508153
complete <- function(directory, id=1:332)
{
## directory is location of the csv files
## id is the montior ID number to be used
## Return a data frame of the form:
## id nobs
## 1 117
## 2 1047
## ..
## Where 'id' is the monitor ID number and 'nobs' is the no. of complete case
#Start
if(1 > min(id) || 332 < max(id)) {
print(paste("Error: id is out of range."))
return(NA)
}
Directory_File <-list.files(directory,full.names = TRUE)
# step 2 create a empty vector object
v <-vector()
# do for loop, to get the data for every csv by reading the file
# store each records in vector with sum of compltete.case of the records
# without na values
for(i in 1:length(id))
{
#read.csv -> reads a file in table format and creates a date frame from it.
records <-c(read.csv(Directory_File[id[i]]))
# complete.cases -> Return a logical vector indicating which cases are complete, i.e., have no missing values.
v[i] <-sum(complete.cases(records))
}
#create data.frame with Id, with the value of nobs
final_data <-data.frame(id,nobs=v)
return(final_data)
} |
### TO MODIFY ####################################################################################################################
library(raster); library(foreign); library(rgdal); library(gdalUtils)
wd <- '/vol/milkun3/Valerio/FLO1K_1.1/'
wd.old <- '/vol/milkun3/Valerio/FLO1K/'
source(paste0(wd,'scripts/functions_generic.R'))
folder.store = dir_(paste0(wd,"/COVAR.DYN.ACC/"))
folder.temp = dir_(paste0(wd,"/COVAR.DYN.ACC/CRU.temp.pet/"))
folder.cru = paste0(wd.old,'/COVAR.DYN.ACC/CRU/')
folder.solrad = paste0(wd.old,'/COVAR.DYN.ACC/ET_SolRad/')
folder.flow.dir = paste0("/vol/milkun8/Valerio/HYDROGRAPHY/DIR_taudem/")
# modify with name of CRU TS file
file.tmp = 'cru_ts3.24.01.1901.2015.tmp.dat.nc'
file.dtr = 'cru_ts3.24.01.1901.2015.dtr.dat.nc'
prefix = 'e'
# time interval
start_year = 1960
end_year = 2015
database_start_year = 1901
#specify variable name as in the CRU database
var_tmp = 'tmp'
var_dtr = 'dtr'
##################################################################################################################################
##################################################################################################################################
#aggregate ET_SolRad to 0.5 (native resolution is 0.025)
folder.solrad.aggr = paste0(folder.solrad,'HalfDeg/')
if (!dir.exists(folder.solrad.aggr)){
dir.create(file.path(folder.solrad.aggr))
for(i in 1:12){
r = readGDAL(paste0(folder.solrad,'et_solrad_',i))
name_store = paste0(folder.solrad.aggr,i,'_temp.tif')
name_res = paste0(folder.solrad.aggr,i,'.tif')
writeGDAL(r,name_store)
gdal_translate(name_store,name_res,ot="Float32",of='GTiff',tr = c(0.5,0.5),r='average')
file.remove(name_store)
}
}
#months to exclude: e.g. 12*(1981-1901)+1=961
start_month = 12*(start_year - database_start_year)+1-3 #-3 is to consider the water year starting October of the previous month
#create sequence of "Januaries" for each year
yearseq = start_year:end_year
nseq = seq(start_month,(start_month+length(yearseq)*12-12),12)
days2month = c(31,30,31,31,28,31,30,31,30,31,31,30) #days in each water year month
solrad_seq = c(10:12,1:9) #according to the water year
#read the ncdf file
for(i in 1:length(yearseq)){
pet_list = list()
for(j in 1:12){
#CRU tmp [C]
Tmean = raster(paste0(folder.cru,file.tmp), varname=var_tmp, band = (nseq[i]+(j-1)))
#CRU dtr [C]
TD = raster(paste0(folder.cru,file.dtr), varname=var_dtr, band = (nseq[i]+(j-1)))
#Solar Radiation [mm/day] --> need mm/month
RA = raster(paste0(folder.solrad.aggr,solrad_seq[j],'.tif'),
crs=crs(Tmean))*days2month[j]
#Hargreaves PET formulation
pet_list[[j]] = 0.0023*RA*(Tmean + 17.8)*TD**0.5
}
sum = Reduce('+',pet_list)
sum[sum < 0] <- 0 #important: there are some negative values at high latitudes
#average over the year
av = sum/12
#seasonality index (si) over the year
si = Reduce('+',lapply(pet_list,function(x) abs(x-av)))/sum
#now we have av,si for the current year
var_metrics = c('av','si')
#need to iterate the operation for the 6 continents
continents = c('af','as','au','eu','na','saca')
for(c in 1:length(var_metrics)){
var = get(var_metrics[c])
#store the variable as integer, different for p or t
var_store = var
name_store = paste0(folder.temp,var_metrics[c],'.tif')
name_res = paste0(folder.temp,var_metrics[c],'_res.tif')
writeRaster(var_store,name_store,format = 'GTiff',datatype='FLT4S', overwrite=TRUE)
gdal_translate(name_store,name_res,ot="Float32",of='GTiff',tr = c(0.0083333333,0.0083333333),r='nearest')
storepath_var <- paste0(folder.store,prefix,var_metrics[c],"/")
#create folder if it doesnt exist
if (!dir.exists(storepath_var)){
dir.create(file.path(storepath_var))
}
for(n in 1:length(continents)){
storepath_cont <- paste0(storepath_var,continents[n],"/")
#create folder if it doesnt exist
if (!dir.exists(storepath_cont)){
dir.create(file.path(storepath_cont))
}
#load the name of flow dir file and raster
name_fd = paste0(folder.flow.dir,continents[n],'_dir.tif')
name_index = paste0(folder.temp,continents[n],'_index.shp')
name_res_cont = paste0(folder.temp,continents[n],'_',var_metrics[c],'.tif')
if(!file.exists(name_index)) gdaltindex(name_index,name_fd)
#crop the global raster with GDAL to the extent of the fd raster
gdalwarp(name_res,name_res_cont,cutline=name_index,t_srs = crs(raster(name_fd)),
crop_to_cutline = TRUE,overwrite = TRUE)
name_acc = paste0(storepath_cont,yearseq[i],'.tif')
system(
paste0(
'mpiexec -n 20 /opt/Taudem5/bin/aread8 -p ',
name_fd,' -ad8 ',name_acc,' -wg ',name_res_cont,' -nc'
)
)
}
#DELETE files
file.remove(
c(name_store,
name_res,
paste0(folder.temp,continents,'_',var_metrics[c],'.tif'))
)
}
} | /scripts/covar.dyn.CRU.pet.R | no_license | vbarbarossa/flo1k | R | false | false | 5,317 | r | ### TO MODIFY ####################################################################################################################
library(raster); library(foreign); library(rgdal); library(gdalUtils)
wd <- '/vol/milkun3/Valerio/FLO1K_1.1/'
wd.old <- '/vol/milkun3/Valerio/FLO1K/'
source(paste0(wd,'scripts/functions_generic.R'))
folder.store = dir_(paste0(wd,"/COVAR.DYN.ACC/"))
folder.temp = dir_(paste0(wd,"/COVAR.DYN.ACC/CRU.temp.pet/"))
folder.cru = paste0(wd.old,'/COVAR.DYN.ACC/CRU/')
folder.solrad = paste0(wd.old,'/COVAR.DYN.ACC/ET_SolRad/')
folder.flow.dir = paste0("/vol/milkun8/Valerio/HYDROGRAPHY/DIR_taudem/")
# modify with name of CRU TS file
file.tmp = 'cru_ts3.24.01.1901.2015.tmp.dat.nc'
file.dtr = 'cru_ts3.24.01.1901.2015.dtr.dat.nc'
prefix = 'e'
# time interval
start_year = 1960
end_year = 2015
database_start_year = 1901
#specify variable name as in the CRU database
var_tmp = 'tmp'
var_dtr = 'dtr'
##################################################################################################################################
##################################################################################################################################
#aggregate ET_SolRad to 0.5 (native resolution is 0.025)
folder.solrad.aggr = paste0(folder.solrad,'HalfDeg/')
if (!dir.exists(folder.solrad.aggr)){
dir.create(file.path(folder.solrad.aggr))
for(i in 1:12){
r = readGDAL(paste0(folder.solrad,'et_solrad_',i))
name_store = paste0(folder.solrad.aggr,i,'_temp.tif')
name_res = paste0(folder.solrad.aggr,i,'.tif')
writeGDAL(r,name_store)
gdal_translate(name_store,name_res,ot="Float32",of='GTiff',tr = c(0.5,0.5),r='average')
file.remove(name_store)
}
}
#months to exclude: e.g. 12*(1981-1901)+1=961
start_month = 12*(start_year - database_start_year)+1-3 #-3 is to consider the water year starting October of the previous month
#create sequence of "Januaries" for each year
yearseq = start_year:end_year
nseq = seq(start_month,(start_month+length(yearseq)*12-12),12)
days2month = c(31,30,31,31,28,31,30,31,30,31,31,30) #days in each water year month
solrad_seq = c(10:12,1:9) #according to the water year
#read the ncdf file
for(i in 1:length(yearseq)){
pet_list = list()
for(j in 1:12){
#CRU tmp [C]
Tmean = raster(paste0(folder.cru,file.tmp), varname=var_tmp, band = (nseq[i]+(j-1)))
#CRU dtr [C]
TD = raster(paste0(folder.cru,file.dtr), varname=var_dtr, band = (nseq[i]+(j-1)))
#Solar Radiation [mm/day] --> need mm/month
RA = raster(paste0(folder.solrad.aggr,solrad_seq[j],'.tif'),
crs=crs(Tmean))*days2month[j]
#Hargreaves PET formulation
pet_list[[j]] = 0.0023*RA*(Tmean + 17.8)*TD**0.5
}
sum = Reduce('+',pet_list)
sum[sum < 0] <- 0 #important: there are some negative values at high latitudes
#average over the year
av = sum/12
#seasonality index (si) over the year
si = Reduce('+',lapply(pet_list,function(x) abs(x-av)))/sum
#now we have av,si for the current year
var_metrics = c('av','si')
#need to iterate the operation for the 6 continents
continents = c('af','as','au','eu','na','saca')
for(c in 1:length(var_metrics)){
var = get(var_metrics[c])
#store the variable as integer, different for p or t
var_store = var
name_store = paste0(folder.temp,var_metrics[c],'.tif')
name_res = paste0(folder.temp,var_metrics[c],'_res.tif')
writeRaster(var_store,name_store,format = 'GTiff',datatype='FLT4S', overwrite=TRUE)
gdal_translate(name_store,name_res,ot="Float32",of='GTiff',tr = c(0.0083333333,0.0083333333),r='nearest')
storepath_var <- paste0(folder.store,prefix,var_metrics[c],"/")
#create folder if it doesnt exist
if (!dir.exists(storepath_var)){
dir.create(file.path(storepath_var))
}
for(n in 1:length(continents)){
storepath_cont <- paste0(storepath_var,continents[n],"/")
#create folder if it doesnt exist
if (!dir.exists(storepath_cont)){
dir.create(file.path(storepath_cont))
}
#load the name of flow dir file and raster
name_fd = paste0(folder.flow.dir,continents[n],'_dir.tif')
name_index = paste0(folder.temp,continents[n],'_index.shp')
name_res_cont = paste0(folder.temp,continents[n],'_',var_metrics[c],'.tif')
if(!file.exists(name_index)) gdaltindex(name_index,name_fd)
#crop the global raster with GDAL to the extent of the fd raster
gdalwarp(name_res,name_res_cont,cutline=name_index,t_srs = crs(raster(name_fd)),
crop_to_cutline = TRUE,overwrite = TRUE)
name_acc = paste0(storepath_cont,yearseq[i],'.tif')
system(
paste0(
'mpiexec -n 20 /opt/Taudem5/bin/aread8 -p ',
name_fd,' -ad8 ',name_acc,' -wg ',name_res_cont,' -nc'
)
)
}
#DELETE files
file.remove(
c(name_store,
name_res,
paste0(folder.temp,continents,'_',var_metrics[c],'.tif'))
)
}
} |
#!/usr/bin/env Rscript
# Parse user input
rm(list=ls())
args = commandArgs(trailingOnly=TRUE)
input_file = args[1]
output_file = paste0(input_file, ".categorized")
# Load data
#data = read.table("03-analyses/01-snp_duplication/capelin_47006_overmerged_loci.txt", header=T, stringsAsFactors=F)
data = read.table(input_file, header=T, stringsAsFactors=F)
d = data[,c("MedRatio", "PropHet", "PropHomRare", "Fis", "MedCovHet", "MedCovHom")]
singleton = "#00000011" # black
duplicated = "#FF000044" # red
diverged = "#0000FF22" # blue
lowconf = "#DD00AA22" # purple
highcov = "#00AA0088" # green
mas = "#FFAA0022" # orange
# All loci marked singleton before filters
d$Color = singleton
# Fis is too negative = duplicated
d$Color[d$Fis < -0.30] = duplicated
d$Color[d$Fis + d$MedRatio < 0.2] = duplicated
d$Color[d$Fis + d$MedRatio * 3 < 0.8] = duplicated
#d$Color[d$Fis + d$MedRatio * 8 < 2.3] = duplicated
# MedRatio is high/low and at least one rare allele homozygote
d$Color[d$MedRatio < 0.30 & d$PropHet < 0.05] = lowconf
d$Color[d$MedRatio > 0.65 & d$PropHet < 0.05] = lowconf
## Very low Fis = diverged
#d$Color[d$Fis < -0.8] = diverged
#d$Color[d$Fis + d$MedRatio * 2 < -0.00] = diverged
d$Color[d$Fis + d$MedRatio * 3 < 0.20] = diverged
d$Color[d$PropHet > 0.9] = diverged
#d$Color[d$Fis + d$MedRatio * 8 < 1.5] = diverged
# High Fis
d$Color[d$Fis > 0.4] = lowconf
# Loci with high coverage
d$Color[d$MedCovHom > 60 | d$MedCovHet > 60] = highcov
# Too few samples with rare allele
#d$Color[data$NumHet + data$NumRare < 5] = mas
# Extract bad loci infos
bad_snps = d$Color != singleton
all_loci = unique(gsub("_.*", "", data$ID))
bad_loci = unique(gsub("_.*", "", data$ID[bad_snps]))
# Categorize SNPs to filter loci with next script
data$Category = "singleton"
data$Category[d$Color == duplicated] = "duplicated"
data$Category[d$Color == mas] = "mas"
data$Category[d$Color == diverged] = "diverged"
data$Category[d$Color == lowconf] = "lowconf"
data$Category[d$Color == highcov] = "highcov"
write.table(data[,c("Scaffold", "Position", "ID", "Category")],
output_file, sep="\t", quote=F, row.names=F)
# Report number of SNPs per category
report = table(data$Category)
cat("SNPs")
print(report)
# Plots
png(paste0(input_file, "_1.png"), width=1200, height=950)
plot(d[,1:4], pch=16, cex=1, col=d$Color)
invisible(dev.off())
####################################################
png(paste0(input_file, "_2.png"), width=1200, height=950)
plot(d$PropHet, d$MedRatio, pch=19, cex=1.5, col=d$Color, xlim=c(0, 1), ylim=c(0, 0.8))
invisible(dev.off())
single = d[data$Category == "singleton", ]
png(paste0(input_file, "_3.png"), width=1200, height=950)
plot(single$PropHet,
single$MedRatio,
pch=19, cex=1.5, col=single$Color, xlim=c(0, 1), ylim=c(0, 0.8))
invisible(dev.off())
| /04_day4/02-CNVs/00-scripts/00-corrections_scripts/03.1-snp_categorization_correction.R | no_license | AdamStuckert/physalia_adaptation_course | R | false | false | 2,841 | r | #!/usr/bin/env Rscript
# Parse user input
rm(list=ls())
args = commandArgs(trailingOnly=TRUE)
input_file = args[1]
output_file = paste0(input_file, ".categorized")
# Load data
#data = read.table("03-analyses/01-snp_duplication/capelin_47006_overmerged_loci.txt", header=T, stringsAsFactors=F)
data = read.table(input_file, header=T, stringsAsFactors=F)
d = data[,c("MedRatio", "PropHet", "PropHomRare", "Fis", "MedCovHet", "MedCovHom")]
singleton = "#00000011" # black
duplicated = "#FF000044" # red
diverged = "#0000FF22" # blue
lowconf = "#DD00AA22" # purple
highcov = "#00AA0088" # green
mas = "#FFAA0022" # orange
# All loci marked singleton before filters
d$Color = singleton
# Fis is too negative = duplicated
d$Color[d$Fis < -0.30] = duplicated
d$Color[d$Fis + d$MedRatio < 0.2] = duplicated
d$Color[d$Fis + d$MedRatio * 3 < 0.8] = duplicated
#d$Color[d$Fis + d$MedRatio * 8 < 2.3] = duplicated
# MedRatio is high/low and at least one rare allele homozygote
d$Color[d$MedRatio < 0.30 & d$PropHet < 0.05] = lowconf
d$Color[d$MedRatio > 0.65 & d$PropHet < 0.05] = lowconf
## Very low Fis = diverged
#d$Color[d$Fis < -0.8] = diverged
#d$Color[d$Fis + d$MedRatio * 2 < -0.00] = diverged
d$Color[d$Fis + d$MedRatio * 3 < 0.20] = diverged
d$Color[d$PropHet > 0.9] = diverged
#d$Color[d$Fis + d$MedRatio * 8 < 1.5] = diverged
# High Fis
d$Color[d$Fis > 0.4] = lowconf
# Loci with high coverage
d$Color[d$MedCovHom > 60 | d$MedCovHet > 60] = highcov
# Too few samples with rare allele
#d$Color[data$NumHet + data$NumRare < 5] = mas
# Extract bad loci infos
bad_snps = d$Color != singleton
all_loci = unique(gsub("_.*", "", data$ID))
bad_loci = unique(gsub("_.*", "", data$ID[bad_snps]))
# Categorize SNPs to filter loci with next script
data$Category = "singleton"
data$Category[d$Color == duplicated] = "duplicated"
data$Category[d$Color == mas] = "mas"
data$Category[d$Color == diverged] = "diverged"
data$Category[d$Color == lowconf] = "lowconf"
data$Category[d$Color == highcov] = "highcov"
write.table(data[,c("Scaffold", "Position", "ID", "Category")],
output_file, sep="\t", quote=F, row.names=F)
# Report number of SNPs per category
report = table(data$Category)
cat("SNPs")
print(report)
# Plots
png(paste0(input_file, "_1.png"), width=1200, height=950)
plot(d[,1:4], pch=16, cex=1, col=d$Color)
invisible(dev.off())
####################################################
png(paste0(input_file, "_2.png"), width=1200, height=950)
plot(d$PropHet, d$MedRatio, pch=19, cex=1.5, col=d$Color, xlim=c(0, 1), ylim=c(0, 0.8))
invisible(dev.off())
single = d[data$Category == "singleton", ]
png(paste0(input_file, "_3.png"), width=1200, height=950)
plot(single$PropHet,
single$MedRatio,
pch=19, cex=1.5, col=single$Color, xlim=c(0, 1), ylim=c(0, 0.8))
invisible(dev.off())
|
test_that("appDir must be an existing directory", {
expect_snapshot(error = TRUE, {
deployApp(1)
deployApp("doesntexist")
})
})
test_that("single document appDir is deprecated", {
skip_on_cran()
expect_snapshot(error = TRUE, {
deployApp("foo.Rmd")
})
})
test_that("appPrimaryDoc must exist, if supplied", {
skip_on_cran()
dir <- local_temp_app()
expect_snapshot(error = TRUE, {
deployApp(dir, appPrimaryDoc = c("foo.Rmd", "bar.Rmd"))
deployApp(dir, appPrimaryDoc = "foo.Rmd")
})
})
test_that("startup scripts are logged by default", {
dir <- local_temp_app()
withr::local_dir(dir)
writeLines("1 + 1", file.path(dir, ".rsconnect_profile"))
expect_snapshot(runStartupScripts("."))
})
# record directory --------------------------------------------------------
test_that("findRecordPath() uses recordDir, then appPrimaryDoc, then appDir", {
expect_equal(findRecordPath("a"), "a")
expect_equal(findRecordPath("a", recordDir = "b"), "b")
expect_equal(findRecordPath("a", appPrimaryDoc = "c"), "a/c")
})
# app visibility ----------------------------------------------------------
test_that("needsVisibilityChange() returns FALSE when no change needed", {
dummyApp <- function(visibility) {
list(
deployment = list(
properties = list(
application.visibility = visibility
)
)
)
}
expect_false(needsVisibilityChange("connect.com"))
expect_false(needsVisibilityChange("shinyapps.io", dummyApp("public"), NULL))
expect_false(needsVisibilityChange("shinyapps.io", dummyApp("public"), "public"))
expect_true(needsVisibilityChange("shinyapps.io", dummyApp(NULL), "private"))
expect_true(needsVisibilityChange("shinyapps.io", dummyApp("public"), "private"))
})
test_that("needsVisibilityChange() errors for cloud", {
expect_snapshot(error = TRUE,
needsVisibilityChange("posit.cloud", appVisibility = "public")
)
})
test_that("deployHook executes function if set", {
withr::local_options(rsconnect.pre.deploy = NULL)
expect_equal(
runDeploymentHook("PATH", "rsconnect.pre.deploy"),
NULL
)
withr::local_options(rsconnect.pre.deploy = function(path) path)
expect_equal(
runDeploymentHook("PATH", "rsconnect.pre.deploy"),
"PATH"
)
expect_snapshot(
. <- runDeploymentHook("PATH", "rsconnect.pre.deploy", verbose = TRUE)
)
})
# deleted apps ------------------------------------------------------------
test_that("applicationDeleted() errors or prompts as needed", {
local_temp_config()
addTestServer("s")
addTestAccount("a", "s")
app <- local_temp_app()
addTestDeployment(app, appName = "name", account = "a", server = "s")
target <- createDeploymentTarget("name", "title", "id", NULL, "a", "a", "s", 1)
client <- list(createApplication = function(...) NULL)
expect_snapshot(applicationDeleted(client, target, app), error = TRUE)
expect_length(dir(app, recursive = TRUE), 1)
simulate_user_input(2)
expect_snapshot(. <- applicationDeleted(client, target, app))
expect_length(dir(app, recursive = TRUE), 0)
})
| /tests/testthat/test-deployApp.R | no_license | rstudio/rsconnect | R | false | false | 3,085 | r | test_that("appDir must be an existing directory", {
expect_snapshot(error = TRUE, {
deployApp(1)
deployApp("doesntexist")
})
})
test_that("single document appDir is deprecated", {
skip_on_cran()
expect_snapshot(error = TRUE, {
deployApp("foo.Rmd")
})
})
test_that("appPrimaryDoc must exist, if supplied", {
skip_on_cran()
dir <- local_temp_app()
expect_snapshot(error = TRUE, {
deployApp(dir, appPrimaryDoc = c("foo.Rmd", "bar.Rmd"))
deployApp(dir, appPrimaryDoc = "foo.Rmd")
})
})
test_that("startup scripts are logged by default", {
dir <- local_temp_app()
withr::local_dir(dir)
writeLines("1 + 1", file.path(dir, ".rsconnect_profile"))
expect_snapshot(runStartupScripts("."))
})
# record directory --------------------------------------------------------
test_that("findRecordPath() uses recordDir, then appPrimaryDoc, then appDir", {
expect_equal(findRecordPath("a"), "a")
expect_equal(findRecordPath("a", recordDir = "b"), "b")
expect_equal(findRecordPath("a", appPrimaryDoc = "c"), "a/c")
})
# app visibility ----------------------------------------------------------
test_that("needsVisibilityChange() returns FALSE when no change needed", {
dummyApp <- function(visibility) {
list(
deployment = list(
properties = list(
application.visibility = visibility
)
)
)
}
expect_false(needsVisibilityChange("connect.com"))
expect_false(needsVisibilityChange("shinyapps.io", dummyApp("public"), NULL))
expect_false(needsVisibilityChange("shinyapps.io", dummyApp("public"), "public"))
expect_true(needsVisibilityChange("shinyapps.io", dummyApp(NULL), "private"))
expect_true(needsVisibilityChange("shinyapps.io", dummyApp("public"), "private"))
})
test_that("needsVisibilityChange() errors for cloud", {
expect_snapshot(error = TRUE,
needsVisibilityChange("posit.cloud", appVisibility = "public")
)
})
test_that("deployHook executes function if set", {
withr::local_options(rsconnect.pre.deploy = NULL)
expect_equal(
runDeploymentHook("PATH", "rsconnect.pre.deploy"),
NULL
)
withr::local_options(rsconnect.pre.deploy = function(path) path)
expect_equal(
runDeploymentHook("PATH", "rsconnect.pre.deploy"),
"PATH"
)
expect_snapshot(
. <- runDeploymentHook("PATH", "rsconnect.pre.deploy", verbose = TRUE)
)
})
# deleted apps ------------------------------------------------------------
test_that("applicationDeleted() errors or prompts as needed", {
local_temp_config()
addTestServer("s")
addTestAccount("a", "s")
app <- local_temp_app()
addTestDeployment(app, appName = "name", account = "a", server = "s")
target <- createDeploymentTarget("name", "title", "id", NULL, "a", "a", "s", 1)
client <- list(createApplication = function(...) NULL)
expect_snapshot(applicationDeleted(client, target, app), error = TRUE)
expect_length(dir(app, recursive = TRUE), 1)
simulate_user_input(2)
expect_snapshot(. <- applicationDeleted(client, target, app))
expect_length(dir(app, recursive = TRUE), 0)
})
|
adapted.cmh.test <- function(freq, coverage, Ne, gen, repl, poolSize=NULL, mincov=1, MeanStart=TRUE, IntGen=FALSE, TA=FALSE, order=0, correct = FALSE, RetVal=0){
if(!RetVal==0 && !RetVal==1 && !RetVal==2){
stop("(", RetVal, ") is not a valid choice for RetVal.
RetVal needs to be 0 (p-value) or 1 (test statistic) or 2 (test statistic an p-value).")
}
if (sum(is.na(freq))>0){
stop("Allele frequency matrix has missing values")
}
if (sum(is.na(coverage))>0){
stop("Coverage matrix has missing values")
}
if(sum(freq<0)>0) {
stop("Negative allele frequencies are not allowed")
}
if(sum(coverage<=0)>0) {
stop("Negative and 0 coverages are not allowed")
}
if(!is.null(poolSize) && sum(poolSize<=0)>0) {
stop("Negative and 0 sizes for pool size are not allowed")
}
if (TA==TRUE && IntGen==FALSE){
warning("Is it only possible to do Taylor approximation of the variance of the test
statistic if intermediate generatons are given. TA option will be ignored")
}
if (IntGen==FALSE && length(unique(gen))>2){
stop("IntGen is set to FALSE. Only two generations can be considered.")
}
if (IntGen==TRUE && length(unique(gen))==2){
warning("IntGen is set to TRUE, but only two time points are given. They will be used
as first and last time point and no intermediate generations will be considered.")
IntGen=FALSE
}
if(length(mincov <- as.numeric(mincov)) != 1 ) {
stop("Length of 'mincov' (", length(mincov), ") has to be equal to '1'.")
}
if(is.na(mincov) | mincov < 1 ) {
stop("'mincov' (", mincov, ") has to be >= 1.")
}
if (sum(coverage<mincov)>0) {
warning("'NA' will be returned in the entries where 'coverage' is smaller than 'mincov' (", mincov, ") ")
}
if(is.vector(freq))
freq <- matrix(freq, nrow=1)
if(is.vector(coverage))
coverage <- matrix(coverage, nrow=1)
if (length(unique(repl))<2)
stop("Data for at least 2 replicates are needed")
if(!identical(dim(freq), dim(coverage)))
stop("The dimensions of 'freq' (", dim(freq), ") and 'coverage' (", dim(coverage), ") have to be identical.")
if(!missing(Ne)){
if (!is.integer(Ne)){
Ne <- as.integer(Ne)
warning("Ne value(s) which are not integer are converted to integer")
}
}
npop <- ncol(freq)
if(npop == 1)
stop("Allele frequencies of at least two populations need to be provided.")
if(npop %% length(gen) != 0 || npop %% length(repl) != 0)
stop("The number of populations (", npop, ") has to be a multiple of the length of 'gen'(", length(gen), ") and 'repl' (", length(repl), ").")
ng<-length(unique(gen))
nreps<-length(unique(repl))
if (length(unique(gen))>1){
if (order==0) {
popInfo <- data.table(pop=1:ncol(freq), gen=c(rep(unique(gen),nreps)), repl=rep(unique(repl),each=ng))
} else if (order==1) {
popInfo <- data.table(pop=1:ncol(freq), gen=c(rep(unique(gen),each=nreps)), repl=rep(unique(repl),ng))
} else {
stop("The order of the columns in the matrix of allele frequency and coverages can assume only values 0 and 1")
}
} else {
if (order==0) {
popInfo <- data.table(pop=1:ncol(freq), gen=c(rep(unique(gen),nreps)), repl=rep(unique(repl),each=2))
} else if (order==1) {
popInfo <- data.table(pop=1:ncol(freq), gen=c(rep(unique(gen),each=nreps)), repl=rep(unique(repl),2))
} else {
stop("The order of the columns in the matrix of allele frequency and coverages can assume only values 0 and 1")
}
}
mask <- rowSums(coverage < mincov)
stat <- NA
# no drift
if(length(unique(popInfo$gen)) == 1) {
if(!missing(Ne))
warning("Value of 'Ne' will be ignored because no random genetic drift is assumed.")
# individual sequencing
if(is.null(poolSize)) {
if (order==0) {
x1 <- coverage[,seq(1,2*nreps,by=2)]
x2 <- coverage[,seq(2,2*nreps,by=2)]
x11 <- freq[,seq(1,2*nreps,by=2)] * x1
x21 <- freq[,seq(2,2*nreps,by=2)] * x2
} else {
x1 <- coverage[,1:nreps]
x2 <- coverage[,(nreps+1):(2*nreps)]
x11 <- freq[,1:nreps] * x1
x21 <- freq[,(nreps+1):(2*nreps)] * x2
}
n <- x1 + x2
if (nrow(freq)==1){
if(all(x11==0)) {
x11[x11==0] <- rep(1,sum(x11==0))
warning('The counts that equal 0 or equal the coverage in all replicates are changed to 1 or to coverage-1 respectively.')
}
if(all(x11==x1)){
x11[x11==x1] <- x1[x11==x1]-1
warning('The counts that equal 0 or equal the coverage in all replicates are changed to 1 or to coverage-1 respectively.')
}
} else {
if (sum(rowSums(x11)==0)>0) {
x11[rowSums(x11)==0,1] <- rep(1,sum(rowSums(x11)==0))
warning('The counts that equal 0 or equal the coverage in all replicates are changed to 1 or to coverage-1 respectively.')
}
if(sum(rowSums(x11==x1)==nreps)>0){
x11[rowSums(x11==x1)==nreps] <- x1[rowSums(x11==x1)==nreps,1]-1
warning('The counts that equal 0 or equal the coverage in all replicates are changed to 1 or to coverage-1 respectively.')
}
}
x12 <- x1 - x11
x22 <- x2 - x21
x_1 <- x11 + x21
x_2 <- x12 + x22
if (correct){
warning("A continuity correction is applied to the test statistic")
if (nrow(freq)==1) {stat<-(abs(sum(x11 - x1*x_1/n)) - 0.5)^2/sum(x1*x_1*x2*x_2/(n^2*(n-1))) }
else { stat<-(abs(rowSums(x11 - x1*x_1/n)) - 0.5)^2/rowSums(x1*x_1*x2*x_2/(n^2*(n-1))) }
} else {
if (nrow(freq)==1) {stat<-(sum(x11 - x1*x_1/n))^2/sum(x1*x_1*x2*x_2/(n^2*(n-1))) }
else { stat<-(rowSums(x11 - x1*x_1/n))^2/rowSums(x1*x_1*x2*x_2/(n^2*(n-1))) }
}
# pooled sequencing
} else if(npop %% length(poolSize) == 0) {
if (order==0) {
R1 <- coverage[,seq(1,2*nreps,by=2)]
R2 <- coverage[,seq(2,2*nreps,by=2)]
x11R <- freq[,seq(1,2*nreps,by=2)] * R1
x21R <- freq[,seq(2,2*nreps,by=2)] * R2
x1 <- poolSize[seq(1,2*nreps, by=2)]
x2 <- poolSize[seq(2,2*nreps, by=2)]
} else {
R1 <- coverage[,1:nreps]
R2 <- coverage[,(nreps+1):(2*nreps)]
x11R <- freq[,1:nreps] * R1
x21R <- freq[,(nreps+1):(2*nreps)] * R2
x1 <- poolSize[1:nreps]
x2 <- poolSize[(nreps+1):(2*nreps)]
}
n<- R1+R2
if (nrow(freq)==1){
if(all(x11R==0)) {
x11R[x11R==0] <- rep(1,sum(x11R==0))
warning('The counts that equal 0 or equal the coverage in all replicates are changed to 1 or to coverage-1 respectively.')
}
if(all(x11R==R1)){
x11R[x11R==R1] <- R1[x11R==R1]-1
warning('The counts that equal 0 or equal the coverage in all replicates are changed to 1 or to coverage-1 respectively.')
}
} else {
if (sum(rowSums(x11R)==0)>0) {
x11R[rowSums(x11R)==0,1] <- rep(1,sum(rowSums(x11R)==0))
warning('The counts that equal 0 or equal the coverage in all replicates are changed to 1 or to coverage-1 respectively.')
}
if(sum(rowSums(x11R==R1)==nreps)>0){
x11R[rowSums(x11R==R1)==nreps] <- R1[rowSums(x11R==R1)==nreps,1]-1
warning('The counts that equal 0 or equal the coverage in all replicates are changed to 1 or to coverage-1 respectively.')
}
}
x12R <- R1 - x11R
x22R <- R2 - x21R
x_1<-x11R+x21R
x_2<-x12R+x22R
if (nrow(freq)==1) {stat <- sum(x11R - R1*x_1/n)^2/sum((R2/n)^2*(x11R*x12R/R1*(1+(R1-1)/x1)) + (R1/n)^2*(x21R*x22R/R2*(1+(R2-1)/x2))) }
else { stat <- rowSums(x11R - R1*x_1/n)^2/rowSums((R2/n)^2*(x11R*x12R/R1*(1+(R1-1)/x1)) + (R1/n)^2*(x21R*x22R/R2*(1+(R2-1)/x2))) }
} else {
stop("The number of populations (", npop, ") has to be a multiple of the length of 'poolSize' (", length(poolSize), ").")
}
# with drift #####
} else {
if (length(Ne)!=length(unique(repl))){
stop("For each replicate a corresponding value of the effective population size 'Ne' has to be given")
}
# individual sequencing
if(is.null(poolSize)) {
ming <- min(gen)
maxg <- max(gen)
x1 <- coverage[,popInfo$gen == ming]
x2 <- coverage[,popInfo$gen == maxg]
n<-x1+x2
x11 <- matrix(freq[,popInfo$gen == ming], nrow = nrow(freq)) * x1
if (nrow(freq)==1){
if(all(x11==0)) {
x11[x11==0] <- rep(1,sum(x11==0))
warning('The counts that equal 0 or equal the coverage in all replicates are changed to 1 or to coverage-1 respectively.')
}
if(all(x11==x1)){
x11[x11==x1] <- x1[x11==x1]-1
warning('The counts that equal 0 or equal the coverage in all replicates are changed to 1 or to coverage-1 respectively.')
}
} else {
if (sum(rowSums(x11)==0)>0) {
x11[rowSums(x11)==0,1] <- rep(1,sum(rowSums(x11)==0))
warning('The counts that equal 0 or equal the coverage in all replicates are changed to 1 or to coverage-1 respectively.')
}
if(sum(rowSums(x11==x1)==nreps)>0){
x11[rowSums(x11==x1)==nreps] <- x1[rowSums(x11==x1)==nreps,1]-1
warning('The counts that equal 0 or equal the coverage in all replicates are changed to 1 or to coverage-1 respectively.')
}
}
x12 <- x1 - x11
x21 <- matrix(freq[,popInfo$gen == maxg], nrow = nrow(freq)) * x2
x22 <- x2 - x21
x_1<-x11+x21
x_2<-x12+x22
if (nrow(freq)==1) {sigd <- x11/x1*(1-x11/x1)*(1-(1-1/(2*Ne))^(maxg-ming))}
else { sigd <- x11/x1*(1-x11/x1)*matrix(rep(1-(1-1/(2*Ne))^(maxg-ming),nrow(x1)), nrow = nrow(x1), byrow = TRUE) }
if (IntGen == TRUE){
sigd <- c()
freq_int <- matrix(freq[,!(popInfo$gen %in% c(ming, maxg))], nrow = nrow(freq))
coverage_int <- matrix(coverage[,!(popInfo$gen %in% c(ming, maxg))], nrow = nrow(freq))
x_int1 <- freq_int * coverage_int
#if(sum(x_int1==0)>0) {
# x_int1[x_int1==0] <- rep(1,sum(x_int1==0))
# warning('The counts of the intermediate generations assuming values 0 or equal the coverage of the considered
# locus are changed to 1 and to coverage-1 respectively.')
#}
#if(sum(x_int1==coverage_int)>0){
# x_int1[x_int1==coverage_int] <- coverage_int[x_int1==coverage_int]-1
#warning('The counts of the intermediate generations assuming values 0 or equal the coverage of the considered
# locus are changed to 1 and to coverage-1 respectively.')
#}
x_int2 <- coverage_int - x_int1
if (order==0){
if (TA==FALSE){
if (nrow(freq)==1) { sigd_mat <- (c(x11,x_int1)*(c(x1,coverage_int)-c(x11,x_int1))/c(x1,coverage_int)^2)*((1-(1-1/(2*rep(Ne,each=(ng-1))))^(c(rep(sort(gen)[2]-ming,each=nreps),rep(sort(gen)[-c(1,2)]-sort(gen)[-c(1,ng)],nreps)))))}
else { sigd_mat <- (cbind(x11,x_int1)*(cbind(x1,coverage_int)-cbind(x11,x_int1))/cbind(x1,coverage_int)^2)*(matrix(rep(1-(1-1/(2*rep(Ne,each=(ng-1))))^(c(rep(sort(gen)[2]-ming,each=nreps),rep(sort(gen)[-c(1,2)]-sort(gen)[-c(1,ng)],nreps))),nrow(x1)),nrow = nrow(x1), byrow = TRUE)) }
}
if (TA==TRUE){
if (nrow(freq)==1) { sigd_mat <- (c(x11,x_int1)*(c(x1,coverage_int)-c(x11,x_int1))/c(x1,coverage_int)^2)*(c(rep(sort(gen)[2]-ming,each=nreps),rep(sort(gen)[-c(1,2)]-sort(gen)[-c(1,ng)],nreps)))/((2*rep(Ne,each=(ng-1)))) }
else { sigd_mat <- (cbind(x11,x_int1)*(cbind(x1,coverage_int)-cbind(x11,x_int1))/cbind(x1,coverage_int)^2)*(matrix(rep(c(rep(sort(gen)[2]-ming,each=nreps),rep(sort(gen)[-c(1,2)]-sort(gen)[-c(1,ng)],nreps))/((2*rep(Ne,each=(ng-1)))),nrow(x1)),nrow = nrow(x1), byrow = TRUE)) }
}
} else {
if (TA==FALSE){
if (nrow(freq)==1) { sigd_mat <- (c(x11,x_int1)*(c(x1,coverage_int)-c(x11,x_int1))/c(x1,coverage_int)^2)*((1-(1-1/(2*rep(Ne,ng-1)))^(c(rep(sort(gen)[2]-ming,nreps),rep(sort(gen)[-c(1,2)]-sort(gen)[-c(1,ng)],each=nreps)))))}
else { sigd_mat <- (cbind(x11,x_int1)*(cbind(x1,coverage_int)-cbind(x11,x_int1))/cbind(x1,coverage_int)^2)*(matrix(rep(1-(1-1/(2*rep(Ne,ng-1)))^(c(rep(sort(unique(gen))[2]-ming,nreps),rep(sort(unique(gen))[-c(1,2)]-sort(unique(gen))[-c(1,ng)],each=nreps))),nrow(x1)),nrow = nrow(x1), byrow = TRUE)) }
}
if (TA==TRUE){
if (nrow(freq)==1) { sigd_mat <- (c(x11,x_int1)*(c(x1,coverage_int)-c(x11,x_int1))/c(x1,coverage_int)^2)*(c(rep(sort(gen)[2]-ming,nreps),rep(sort(gen)[-c(1,2)]-sort(gen)[-c(1,ng)],each=nreps)))/((2*rep(Ne,ng-1))) }
else { sigd_mat <- (cbind(x11,x_int1)*(cbind(x1,coverage_int)-cbind(x11,x_int1))/cbind(x1,coverage_int)^2)*(matrix(rep(c(rep(sort(unique(gen))[2]-ming,nreps),rep(sort(unique(gen))[-c(1,2)]-sort(unique(gen))[-c(1,ng)],each=nreps)),nrow(x1))/((2*rep(Ne,ng-1))),nrow = nrow(x1), byrow = TRUE)) }
}
}
for (r in unique(repl)){
if (is.vector(sigd_mat)==TRUE) {sigd <- c(sigd, sum(sigd_mat[c(popInfo$repl[popInfo$gen==ming]==r,popInfo$repl[!(popInfo$gen %in% c(ming, maxg))]==r)]))}
else {sigd <- cbind(sigd,rowSums(sigd_mat[,c(popInfo$repl[popInfo$gen==ming]==r,popInfo$repl[!(popInfo$gen %in% c(ming, maxg))]==r)]))}
}
}
Ep2 <- (x11/x1 + x21/x2)/2
if(MeanStart==FALSE)
Ep2 <- x11/x1
if (nrow(freq)==1) {stat <- sum(x11 - x1*x_1/n)^2/sum((x2/n)^2*(x11*x12/x1) + (x1/n)^2*(x2*Ep2*(1-Ep2)+x2*(x2-1)*(sigd)))}
else { stat <- rowSums(x11 - x1*x_1/n)^2/rowSums((x2/n)^2*(x11*x12/x1) + (x1/n)^2*(x2*Ep2*(1-Ep2)+x2*(x2-1)*(sigd))) }
} else if(npop %% length(poolSize) == 0) {
ming <- min(gen)
maxg <- max(gen)
R1 <- matrix(coverage[,popInfo$gen == ming], nrow = nrow(coverage))
R2 <- matrix(coverage[,popInfo$gen == maxg], nrow = nrow(coverage))
n <- R1 + R2
x11R <- matrix(freq[,popInfo$gen == ming], nrow = nrow(freq)) * R1
if (nrow(freq)==1){
if(all(x11R==0)) {
x11R[x11R==0] <- rep(1,sum(x11R==0))
warning('The counts that equal 0 or equal the coverage in all replicates are changed to 1 or to coverage-1 respectively.')
}
if(all(x11R==R1)){
x11R[x11R==R1] <- R1[x11R==R1]-1
warning('The counts that equal 0 or equal the coverage in all replicates are changed to 1 or to coverage-1 respectively.')
}
} else {
if (sum(rowSums(x11R)==0)>0) {
x11R[rowSums(x11R)==0,1] <- rep(1,sum(rowSums(x11R)==0))
warning('The counts that equal 0 or equal the coverage in all replicates are changed to 1 or to coverage-1 respectively.')
}
if(sum(rowSums(x11R==R1)==nreps)>0){
x11R[rowSums(x11R==R1)==nreps] <- R1[rowSums(x11R==R1)==nreps,1]-1
warning('The counts that equal 0 or equal the coverage in all replicates are changed to 1 or to coverage-1 respectively.')
}
}
x12R <- R1 - x11R
x21R <- matrix(freq[,popInfo$gen == maxg], nrow = nrow(freq)) * R2
x22R <- R2 - x21R
x1 <- poolSize[popInfo$gen == ming]
x2 <- poolSize[popInfo$gen == maxg]
x_1 <- x11R+x21R
x_2 <- x12R+x22R
sigd <- x11R/R1*(1-x11R/R1)*(1-(1-1/(2*Ne))^(maxg-ming))
if (IntGen == TRUE){
sigd <- c()
freq_int <- matrix(freq[,!(popInfo$gen %in% c(ming, maxg))], nrow = nrow(freq))
coverage_int <- matrix(coverage[,!(popInfo$gen %in% c(ming, maxg))], nrow = nrow(freq))
x_int1 <- freq_int * coverage_int
# if(sum(x_int1==0)>0) {
# x_int1[x_int1==0] <- rep(1,sum(x_int1==0))
# warning('The counts of the intermediate generations assuming values 0 or equal the coverage of the considered
# locus are changed to 1 and to coverage-1 respectively.')
# }
# if(sum(x_int1==coverage_int)>0){
# x_int1[x_int1==coverage_int] <- coverage_int[x_int1==coverage_int]-1
# warning('The counts of the intermediate generations assuming values 0 or equal the coverage of the considered
# locus are changed to 1 and to coverage-1 respectively.')
# }
x_int2 <- coverage_int - x_int1
pool_int <- coverage_int
if (order==0){
if (TA==FALSE){
if (nrow(freq)==1) {sigd_mat <- (c(x11R,x_int1)*(c(R1,pool_int)-c(x11R,x_int1))/c(R1,pool_int)^2)*((1-(1-1/(2*rep(Ne,each=(ng-1))))^(c(rep(sort(gen)[2]-ming,each=nreps),rep(sort(gen)[-c(1,2)]-sort(gen)[-c(1,ng)],nreps)))))}
else { sigd_mat <- (cbind(x11R,x_int1)*(cbind(R1,pool_int)-cbind(x11R,x_int1))/cbind(R1,pool_int)^2)*(matrix(rep(1-(1-1/(2*rep(Ne,each=(ng-1))))^(c(rep(sort(gen)[2]-ming,each=nreps),rep(sort(gen)[-c(1,2)]-sort(gen)[-c(1,ng)],nreps))),nrow(R1)),nrow = nrow(R1), byrow = TRUE)) }
}
if (TA==TRUE) {
if (nrow(freq)==1) { sigd_mat <- (c(x11R,x_int1)*(c(R1,pool_int)-c(x11R,x_int1))/c(R1,pool_int)^2)*(c(rep(sort(gen)[2]-ming,each=nreps),rep(sort(gen)[-c(1,2)]-sort(gen)[-c(1,ng)],nreps)))/((2*rep(Ne,each=(ng-1))))}
else { sigd_mat <- (cbind(x11R,x_int1)*(cbind(R1,pool_int)-cbind(x11R,x_int1))/cbind(R1,pool_int)^2)*(matrix(rep(c(rep(sort(gen)[2]-ming,each=nreps),rep(sort(gen)[-c(1,2)]-sort(gen)[-c(1,ng)],nreps)),nrow(R1))/((2*rep(Ne,each=(ng-1)))),nrow = nrow(R1), byrow = TRUE)) }
}
} else {
if (TA==FALSE){
if (nrow(freq)==1) {sigd_mat <- (c(x11R,x_int1)*(c(R1,pool_int)-c(x11R,x_int1))/c(R1,pool_int)^2)*((1-(1-1/(2*rep(Ne,ng-1)))^(c(rep(sort(gen)[2]-ming,nreps),rep(sort(gen)[-c(1,2)]-sort(gen)[-c(1,ng)],each=nreps)))))}
else { sigd_mat <- (cbind(x11R,x_int1)*(cbind(R1,pool_int)-cbind(x11R,x_int1))/cbind(R1,pool_int)^2)*(matrix(rep(1-(1-1/(2*rep(Ne,ng-1)))^(c(rep(sort(gen)[2]-ming,nreps),rep(sort(gen)[-c(1,2)]-sort(gen)[-c(1,ng)],each=nreps))),nrow(R1)),nrow = nrow(R1), byrow = TRUE)) }
}
if (TA==TRUE) {
if (nrow(freq)==1) { sigd_mat <- (c(x11R,x_int1)*(c(R1,pool_int)-c(x11R,x_int1))/c(R1,pool_int)^2)*(c(rep(sort(gen)[2]-ming,nreps),rep(sort(gen)[-c(1,2)]-sort(gen)[-c(1,ng)],each=nreps)))/((2*rep(Ne,ng-1)))}
else { sigd_mat <- (cbind(x11R,x_int1)*(cbind(R1,pool_int)-cbind(x11R,x_int1))/cbind(R1,pool_int)^2)*(matrix(rep(c(rep(sort(gen)[2]-ming,nreps),rep(sort(gen)[-c(1,2)]-sort(gen)[-c(1,ng)],each=nreps)),nrow(R1))/((2*rep(Ne,ng-1))),nrow = nrow(R1), byrow = TRUE)) }
}
}
for (r in unique(repl)){
if (is.vector(sigd_mat)==TRUE) {sigd <- c(sigd, sum(sigd_mat[c(popInfo$repl[popInfo$gen==ming]==r,popInfo$repl[!(popInfo$gen %in% c(ming, maxg))]==r)]))}
else {sigd <- cbind(sigd,rowSums(sigd_mat[,c(popInfo$repl[popInfo$gen==ming]==r,popInfo$repl[!(popInfo$gen %in% c(ming, maxg))]==r)]))}
}
}
Ep2 <- (x11R/R1 + x21R/R2)/2
if(MeanStart==FALSE){Ep2 <- x11R/R1}
if (nrow(freq)==1) {stat <- sum(x11R - R1*x_1/n)^2/sum((R2/n)^2*(x11R*x12R/R1*(1+(R1-1)/x1)) + (R1/n)^2*(R2*Ep2-R2*Ep2^2+(R2*(R2-1)/x2)*(Ep2*(1-Ep2)+(x2-1)*(sigd)))) }
else { stat <- rowSums(x11R - R1*x_1/n)^2/rowSums((R2/n)^2*(x11R*x12R/R1*(1+(R1-1)/x1)) + (R1/n)^2*(R2*Ep2-R2*Ep2^2+(R2*(R2-1)/matrix(rep(x2,nrow(R2)),nrow=nrow(R2)))*(Ep2*(1-Ep2)+(matrix(rep(x2-1,nrow(Ep2)),nrow = nrow(Ep2)))*(sigd)))) }
} else {
stop("The number of populations (", npop, ") has to be a multiple of the length of 'poolSize' (", length(poolSize), ").")
}
}
# convert summary statistic to p-value and return it
stat[mask>0] <- NA
res <- pchisq(stat, df=1, lower.tail=FALSE)
if(!RetVal==0){
if(RetVal==1){
res <- stat
}else if(RetVal==2){
res <- cbind(stat,pchisq(stat, df=1, lower.tail=FALSE))
colnames(res) <- c("test_statistic", "p.value")
}
}
return(res)
}
| /R/adapted.cmh.test.R | no_license | MartaPelizzola/ACER | R | false | false | 21,396 | r | adapted.cmh.test <- function(freq, coverage, Ne, gen, repl, poolSize=NULL, mincov=1, MeanStart=TRUE, IntGen=FALSE, TA=FALSE, order=0, correct = FALSE, RetVal=0){
if(!RetVal==0 && !RetVal==1 && !RetVal==2){
stop("(", RetVal, ") is not a valid choice for RetVal.
RetVal needs to be 0 (p-value) or 1 (test statistic) or 2 (test statistic an p-value).")
}
if (sum(is.na(freq))>0){
stop("Allele frequency matrix has missing values")
}
if (sum(is.na(coverage))>0){
stop("Coverage matrix has missing values")
}
if(sum(freq<0)>0) {
stop("Negative allele frequencies are not allowed")
}
if(sum(coverage<=0)>0) {
stop("Negative and 0 coverages are not allowed")
}
if(!is.null(poolSize) && sum(poolSize<=0)>0) {
stop("Negative and 0 sizes for pool size are not allowed")
}
if (TA==TRUE && IntGen==FALSE){
warning("Is it only possible to do Taylor approximation of the variance of the test
statistic if intermediate generatons are given. TA option will be ignored")
}
if (IntGen==FALSE && length(unique(gen))>2){
stop("IntGen is set to FALSE. Only two generations can be considered.")
}
if (IntGen==TRUE && length(unique(gen))==2){
warning("IntGen is set to TRUE, but only two time points are given. They will be used
as first and last time point and no intermediate generations will be considered.")
IntGen=FALSE
}
if(length(mincov <- as.numeric(mincov)) != 1 ) {
stop("Length of 'mincov' (", length(mincov), ") has to be equal to '1'.")
}
if(is.na(mincov) | mincov < 1 ) {
stop("'mincov' (", mincov, ") has to be >= 1.")
}
if (sum(coverage<mincov)>0) {
warning("'NA' will be returned in the entries where 'coverage' is smaller than 'mincov' (", mincov, ") ")
}
if(is.vector(freq))
freq <- matrix(freq, nrow=1)
if(is.vector(coverage))
coverage <- matrix(coverage, nrow=1)
if (length(unique(repl))<2)
stop("Data for at least 2 replicates are needed")
if(!identical(dim(freq), dim(coverage)))
stop("The dimensions of 'freq' (", dim(freq), ") and 'coverage' (", dim(coverage), ") have to be identical.")
if(!missing(Ne)){
if (!is.integer(Ne)){
Ne <- as.integer(Ne)
warning("Ne value(s) which are not integer are converted to integer")
}
}
npop <- ncol(freq)
if(npop == 1)
stop("Allele frequencies of at least two populations need to be provided.")
if(npop %% length(gen) != 0 || npop %% length(repl) != 0)
stop("The number of populations (", npop, ") has to be a multiple of the length of 'gen'(", length(gen), ") and 'repl' (", length(repl), ").")
ng<-length(unique(gen))
nreps<-length(unique(repl))
if (length(unique(gen))>1){
if (order==0) {
popInfo <- data.table(pop=1:ncol(freq), gen=c(rep(unique(gen),nreps)), repl=rep(unique(repl),each=ng))
} else if (order==1) {
popInfo <- data.table(pop=1:ncol(freq), gen=c(rep(unique(gen),each=nreps)), repl=rep(unique(repl),ng))
} else {
stop("The order of the columns in the matrix of allele frequency and coverages can assume only values 0 and 1")
}
} else {
if (order==0) {
popInfo <- data.table(pop=1:ncol(freq), gen=c(rep(unique(gen),nreps)), repl=rep(unique(repl),each=2))
} else if (order==1) {
popInfo <- data.table(pop=1:ncol(freq), gen=c(rep(unique(gen),each=nreps)), repl=rep(unique(repl),2))
} else {
stop("The order of the columns in the matrix of allele frequency and coverages can assume only values 0 and 1")
}
}
mask <- rowSums(coverage < mincov)
stat <- NA
# no drift
if(length(unique(popInfo$gen)) == 1) {
if(!missing(Ne))
warning("Value of 'Ne' will be ignored because no random genetic drift is assumed.")
# individual sequencing
if(is.null(poolSize)) {
if (order==0) {
x1 <- coverage[,seq(1,2*nreps,by=2)]
x2 <- coverage[,seq(2,2*nreps,by=2)]
x11 <- freq[,seq(1,2*nreps,by=2)] * x1
x21 <- freq[,seq(2,2*nreps,by=2)] * x2
} else {
x1 <- coverage[,1:nreps]
x2 <- coverage[,(nreps+1):(2*nreps)]
x11 <- freq[,1:nreps] * x1
x21 <- freq[,(nreps+1):(2*nreps)] * x2
}
n <- x1 + x2
if (nrow(freq)==1){
if(all(x11==0)) {
x11[x11==0] <- rep(1,sum(x11==0))
warning('The counts that equal 0 or equal the coverage in all replicates are changed to 1 or to coverage-1 respectively.')
}
if(all(x11==x1)){
x11[x11==x1] <- x1[x11==x1]-1
warning('The counts that equal 0 or equal the coverage in all replicates are changed to 1 or to coverage-1 respectively.')
}
} else {
if (sum(rowSums(x11)==0)>0) {
x11[rowSums(x11)==0,1] <- rep(1,sum(rowSums(x11)==0))
warning('The counts that equal 0 or equal the coverage in all replicates are changed to 1 or to coverage-1 respectively.')
}
if(sum(rowSums(x11==x1)==nreps)>0){
x11[rowSums(x11==x1)==nreps] <- x1[rowSums(x11==x1)==nreps,1]-1
warning('The counts that equal 0 or equal the coverage in all replicates are changed to 1 or to coverage-1 respectively.')
}
}
x12 <- x1 - x11
x22 <- x2 - x21
x_1 <- x11 + x21
x_2 <- x12 + x22
if (correct){
warning("A continuity correction is applied to the test statistic")
if (nrow(freq)==1) {stat<-(abs(sum(x11 - x1*x_1/n)) - 0.5)^2/sum(x1*x_1*x2*x_2/(n^2*(n-1))) }
else { stat<-(abs(rowSums(x11 - x1*x_1/n)) - 0.5)^2/rowSums(x1*x_1*x2*x_2/(n^2*(n-1))) }
} else {
if (nrow(freq)==1) {stat<-(sum(x11 - x1*x_1/n))^2/sum(x1*x_1*x2*x_2/(n^2*(n-1))) }
else { stat<-(rowSums(x11 - x1*x_1/n))^2/rowSums(x1*x_1*x2*x_2/(n^2*(n-1))) }
}
# pooled sequencing
} else if(npop %% length(poolSize) == 0) {
if (order==0) {
R1 <- coverage[,seq(1,2*nreps,by=2)]
R2 <- coverage[,seq(2,2*nreps,by=2)]
x11R <- freq[,seq(1,2*nreps,by=2)] * R1
x21R <- freq[,seq(2,2*nreps,by=2)] * R2
x1 <- poolSize[seq(1,2*nreps, by=2)]
x2 <- poolSize[seq(2,2*nreps, by=2)]
} else {
R1 <- coverage[,1:nreps]
R2 <- coverage[,(nreps+1):(2*nreps)]
x11R <- freq[,1:nreps] * R1
x21R <- freq[,(nreps+1):(2*nreps)] * R2
x1 <- poolSize[1:nreps]
x2 <- poolSize[(nreps+1):(2*nreps)]
}
n<- R1+R2
if (nrow(freq)==1){
if(all(x11R==0)) {
x11R[x11R==0] <- rep(1,sum(x11R==0))
warning('The counts that equal 0 or equal the coverage in all replicates are changed to 1 or to coverage-1 respectively.')
}
if(all(x11R==R1)){
x11R[x11R==R1] <- R1[x11R==R1]-1
warning('The counts that equal 0 or equal the coverage in all replicates are changed to 1 or to coverage-1 respectively.')
}
} else {
if (sum(rowSums(x11R)==0)>0) {
x11R[rowSums(x11R)==0,1] <- rep(1,sum(rowSums(x11R)==0))
warning('The counts that equal 0 or equal the coverage in all replicates are changed to 1 or to coverage-1 respectively.')
}
if(sum(rowSums(x11R==R1)==nreps)>0){
x11R[rowSums(x11R==R1)==nreps] <- R1[rowSums(x11R==R1)==nreps,1]-1
warning('The counts that equal 0 or equal the coverage in all replicates are changed to 1 or to coverage-1 respectively.')
}
}
x12R <- R1 - x11R
x22R <- R2 - x21R
x_1<-x11R+x21R
x_2<-x12R+x22R
if (nrow(freq)==1) {stat <- sum(x11R - R1*x_1/n)^2/sum((R2/n)^2*(x11R*x12R/R1*(1+(R1-1)/x1)) + (R1/n)^2*(x21R*x22R/R2*(1+(R2-1)/x2))) }
else { stat <- rowSums(x11R - R1*x_1/n)^2/rowSums((R2/n)^2*(x11R*x12R/R1*(1+(R1-1)/x1)) + (R1/n)^2*(x21R*x22R/R2*(1+(R2-1)/x2))) }
} else {
stop("The number of populations (", npop, ") has to be a multiple of the length of 'poolSize' (", length(poolSize), ").")
}
# with drift #####
} else {
if (length(Ne)!=length(unique(repl))){
stop("For each replicate a corresponding value of the effective population size 'Ne' has to be given")
}
# individual sequencing
if(is.null(poolSize)) {
ming <- min(gen)
maxg <- max(gen)
x1 <- coverage[,popInfo$gen == ming]
x2 <- coverage[,popInfo$gen == maxg]
n<-x1+x2
x11 <- matrix(freq[,popInfo$gen == ming], nrow = nrow(freq)) * x1
if (nrow(freq)==1){
if(all(x11==0)) {
x11[x11==0] <- rep(1,sum(x11==0))
warning('The counts that equal 0 or equal the coverage in all replicates are changed to 1 or to coverage-1 respectively.')
}
if(all(x11==x1)){
x11[x11==x1] <- x1[x11==x1]-1
warning('The counts that equal 0 or equal the coverage in all replicates are changed to 1 or to coverage-1 respectively.')
}
} else {
if (sum(rowSums(x11)==0)>0) {
x11[rowSums(x11)==0,1] <- rep(1,sum(rowSums(x11)==0))
warning('The counts that equal 0 or equal the coverage in all replicates are changed to 1 or to coverage-1 respectively.')
}
if(sum(rowSums(x11==x1)==nreps)>0){
x11[rowSums(x11==x1)==nreps] <- x1[rowSums(x11==x1)==nreps,1]-1
warning('The counts that equal 0 or equal the coverage in all replicates are changed to 1 or to coverage-1 respectively.')
}
}
x12 <- x1 - x11
x21 <- matrix(freq[,popInfo$gen == maxg], nrow = nrow(freq)) * x2
x22 <- x2 - x21
x_1<-x11+x21
x_2<-x12+x22
if (nrow(freq)==1) {sigd <- x11/x1*(1-x11/x1)*(1-(1-1/(2*Ne))^(maxg-ming))}
else { sigd <- x11/x1*(1-x11/x1)*matrix(rep(1-(1-1/(2*Ne))^(maxg-ming),nrow(x1)), nrow = nrow(x1), byrow = TRUE) }
if (IntGen == TRUE){
sigd <- c()
freq_int <- matrix(freq[,!(popInfo$gen %in% c(ming, maxg))], nrow = nrow(freq))
coverage_int <- matrix(coverage[,!(popInfo$gen %in% c(ming, maxg))], nrow = nrow(freq))
x_int1 <- freq_int * coverage_int
#if(sum(x_int1==0)>0) {
# x_int1[x_int1==0] <- rep(1,sum(x_int1==0))
# warning('The counts of the intermediate generations assuming values 0 or equal the coverage of the considered
# locus are changed to 1 and to coverage-1 respectively.')
#}
#if(sum(x_int1==coverage_int)>0){
# x_int1[x_int1==coverage_int] <- coverage_int[x_int1==coverage_int]-1
#warning('The counts of the intermediate generations assuming values 0 or equal the coverage of the considered
# locus are changed to 1 and to coverage-1 respectively.')
#}
x_int2 <- coverage_int - x_int1
if (order==0){
if (TA==FALSE){
if (nrow(freq)==1) { sigd_mat <- (c(x11,x_int1)*(c(x1,coverage_int)-c(x11,x_int1))/c(x1,coverage_int)^2)*((1-(1-1/(2*rep(Ne,each=(ng-1))))^(c(rep(sort(gen)[2]-ming,each=nreps),rep(sort(gen)[-c(1,2)]-sort(gen)[-c(1,ng)],nreps)))))}
else { sigd_mat <- (cbind(x11,x_int1)*(cbind(x1,coverage_int)-cbind(x11,x_int1))/cbind(x1,coverage_int)^2)*(matrix(rep(1-(1-1/(2*rep(Ne,each=(ng-1))))^(c(rep(sort(gen)[2]-ming,each=nreps),rep(sort(gen)[-c(1,2)]-sort(gen)[-c(1,ng)],nreps))),nrow(x1)),nrow = nrow(x1), byrow = TRUE)) }
}
if (TA==TRUE){
if (nrow(freq)==1) { sigd_mat <- (c(x11,x_int1)*(c(x1,coverage_int)-c(x11,x_int1))/c(x1,coverage_int)^2)*(c(rep(sort(gen)[2]-ming,each=nreps),rep(sort(gen)[-c(1,2)]-sort(gen)[-c(1,ng)],nreps)))/((2*rep(Ne,each=(ng-1)))) }
else { sigd_mat <- (cbind(x11,x_int1)*(cbind(x1,coverage_int)-cbind(x11,x_int1))/cbind(x1,coverage_int)^2)*(matrix(rep(c(rep(sort(gen)[2]-ming,each=nreps),rep(sort(gen)[-c(1,2)]-sort(gen)[-c(1,ng)],nreps))/((2*rep(Ne,each=(ng-1)))),nrow(x1)),nrow = nrow(x1), byrow = TRUE)) }
}
} else {
if (TA==FALSE){
if (nrow(freq)==1) { sigd_mat <- (c(x11,x_int1)*(c(x1,coverage_int)-c(x11,x_int1))/c(x1,coverage_int)^2)*((1-(1-1/(2*rep(Ne,ng-1)))^(c(rep(sort(gen)[2]-ming,nreps),rep(sort(gen)[-c(1,2)]-sort(gen)[-c(1,ng)],each=nreps)))))}
else { sigd_mat <- (cbind(x11,x_int1)*(cbind(x1,coverage_int)-cbind(x11,x_int1))/cbind(x1,coverage_int)^2)*(matrix(rep(1-(1-1/(2*rep(Ne,ng-1)))^(c(rep(sort(unique(gen))[2]-ming,nreps),rep(sort(unique(gen))[-c(1,2)]-sort(unique(gen))[-c(1,ng)],each=nreps))),nrow(x1)),nrow = nrow(x1), byrow = TRUE)) }
}
if (TA==TRUE){
if (nrow(freq)==1) { sigd_mat <- (c(x11,x_int1)*(c(x1,coverage_int)-c(x11,x_int1))/c(x1,coverage_int)^2)*(c(rep(sort(gen)[2]-ming,nreps),rep(sort(gen)[-c(1,2)]-sort(gen)[-c(1,ng)],each=nreps)))/((2*rep(Ne,ng-1))) }
else { sigd_mat <- (cbind(x11,x_int1)*(cbind(x1,coverage_int)-cbind(x11,x_int1))/cbind(x1,coverage_int)^2)*(matrix(rep(c(rep(sort(unique(gen))[2]-ming,nreps),rep(sort(unique(gen))[-c(1,2)]-sort(unique(gen))[-c(1,ng)],each=nreps)),nrow(x1))/((2*rep(Ne,ng-1))),nrow = nrow(x1), byrow = TRUE)) }
}
}
for (r in unique(repl)){
if (is.vector(sigd_mat)==TRUE) {sigd <- c(sigd, sum(sigd_mat[c(popInfo$repl[popInfo$gen==ming]==r,popInfo$repl[!(popInfo$gen %in% c(ming, maxg))]==r)]))}
else {sigd <- cbind(sigd,rowSums(sigd_mat[,c(popInfo$repl[popInfo$gen==ming]==r,popInfo$repl[!(popInfo$gen %in% c(ming, maxg))]==r)]))}
}
}
Ep2 <- (x11/x1 + x21/x2)/2
if(MeanStart==FALSE)
Ep2 <- x11/x1
if (nrow(freq)==1) {stat <- sum(x11 - x1*x_1/n)^2/sum((x2/n)^2*(x11*x12/x1) + (x1/n)^2*(x2*Ep2*(1-Ep2)+x2*(x2-1)*(sigd)))}
else { stat <- rowSums(x11 - x1*x_1/n)^2/rowSums((x2/n)^2*(x11*x12/x1) + (x1/n)^2*(x2*Ep2*(1-Ep2)+x2*(x2-1)*(sigd))) }
} else if(npop %% length(poolSize) == 0) {
ming <- min(gen)
maxg <- max(gen)
R1 <- matrix(coverage[,popInfo$gen == ming], nrow = nrow(coverage))
R2 <- matrix(coverage[,popInfo$gen == maxg], nrow = nrow(coverage))
n <- R1 + R2
x11R <- matrix(freq[,popInfo$gen == ming], nrow = nrow(freq)) * R1
if (nrow(freq)==1){
if(all(x11R==0)) {
x11R[x11R==0] <- rep(1,sum(x11R==0))
warning('The counts that equal 0 or equal the coverage in all replicates are changed to 1 or to coverage-1 respectively.')
}
if(all(x11R==R1)){
x11R[x11R==R1] <- R1[x11R==R1]-1
warning('The counts that equal 0 or equal the coverage in all replicates are changed to 1 or to coverage-1 respectively.')
}
} else {
if (sum(rowSums(x11R)==0)>0) {
x11R[rowSums(x11R)==0,1] <- rep(1,sum(rowSums(x11R)==0))
warning('The counts that equal 0 or equal the coverage in all replicates are changed to 1 or to coverage-1 respectively.')
}
if(sum(rowSums(x11R==R1)==nreps)>0){
x11R[rowSums(x11R==R1)==nreps] <- R1[rowSums(x11R==R1)==nreps,1]-1
warning('The counts that equal 0 or equal the coverage in all replicates are changed to 1 or to coverage-1 respectively.')
}
}
x12R <- R1 - x11R
x21R <- matrix(freq[,popInfo$gen == maxg], nrow = nrow(freq)) * R2
x22R <- R2 - x21R
x1 <- poolSize[popInfo$gen == ming]
x2 <- poolSize[popInfo$gen == maxg]
x_1 <- x11R+x21R
x_2 <- x12R+x22R
sigd <- x11R/R1*(1-x11R/R1)*(1-(1-1/(2*Ne))^(maxg-ming))
if (IntGen == TRUE){
sigd <- c()
freq_int <- matrix(freq[,!(popInfo$gen %in% c(ming, maxg))], nrow = nrow(freq))
coverage_int <- matrix(coverage[,!(popInfo$gen %in% c(ming, maxg))], nrow = nrow(freq))
x_int1 <- freq_int * coverage_int
# if(sum(x_int1==0)>0) {
# x_int1[x_int1==0] <- rep(1,sum(x_int1==0))
# warning('The counts of the intermediate generations assuming values 0 or equal the coverage of the considered
# locus are changed to 1 and to coverage-1 respectively.')
# }
# if(sum(x_int1==coverage_int)>0){
# x_int1[x_int1==coverage_int] <- coverage_int[x_int1==coverage_int]-1
# warning('The counts of the intermediate generations assuming values 0 or equal the coverage of the considered
# locus are changed to 1 and to coverage-1 respectively.')
# }
x_int2 <- coverage_int - x_int1
pool_int <- coverage_int
if (order==0){
if (TA==FALSE){
if (nrow(freq)==1) {sigd_mat <- (c(x11R,x_int1)*(c(R1,pool_int)-c(x11R,x_int1))/c(R1,pool_int)^2)*((1-(1-1/(2*rep(Ne,each=(ng-1))))^(c(rep(sort(gen)[2]-ming,each=nreps),rep(sort(gen)[-c(1,2)]-sort(gen)[-c(1,ng)],nreps)))))}
else { sigd_mat <- (cbind(x11R,x_int1)*(cbind(R1,pool_int)-cbind(x11R,x_int1))/cbind(R1,pool_int)^2)*(matrix(rep(1-(1-1/(2*rep(Ne,each=(ng-1))))^(c(rep(sort(gen)[2]-ming,each=nreps),rep(sort(gen)[-c(1,2)]-sort(gen)[-c(1,ng)],nreps))),nrow(R1)),nrow = nrow(R1), byrow = TRUE)) }
}
if (TA==TRUE) {
if (nrow(freq)==1) { sigd_mat <- (c(x11R,x_int1)*(c(R1,pool_int)-c(x11R,x_int1))/c(R1,pool_int)^2)*(c(rep(sort(gen)[2]-ming,each=nreps),rep(sort(gen)[-c(1,2)]-sort(gen)[-c(1,ng)],nreps)))/((2*rep(Ne,each=(ng-1))))}
else { sigd_mat <- (cbind(x11R,x_int1)*(cbind(R1,pool_int)-cbind(x11R,x_int1))/cbind(R1,pool_int)^2)*(matrix(rep(c(rep(sort(gen)[2]-ming,each=nreps),rep(sort(gen)[-c(1,2)]-sort(gen)[-c(1,ng)],nreps)),nrow(R1))/((2*rep(Ne,each=(ng-1)))),nrow = nrow(R1), byrow = TRUE)) }
}
} else {
if (TA==FALSE){
if (nrow(freq)==1) {sigd_mat <- (c(x11R,x_int1)*(c(R1,pool_int)-c(x11R,x_int1))/c(R1,pool_int)^2)*((1-(1-1/(2*rep(Ne,ng-1)))^(c(rep(sort(gen)[2]-ming,nreps),rep(sort(gen)[-c(1,2)]-sort(gen)[-c(1,ng)],each=nreps)))))}
else { sigd_mat <- (cbind(x11R,x_int1)*(cbind(R1,pool_int)-cbind(x11R,x_int1))/cbind(R1,pool_int)^2)*(matrix(rep(1-(1-1/(2*rep(Ne,ng-1)))^(c(rep(sort(gen)[2]-ming,nreps),rep(sort(gen)[-c(1,2)]-sort(gen)[-c(1,ng)],each=nreps))),nrow(R1)),nrow = nrow(R1), byrow = TRUE)) }
}
if (TA==TRUE) {
if (nrow(freq)==1) { sigd_mat <- (c(x11R,x_int1)*(c(R1,pool_int)-c(x11R,x_int1))/c(R1,pool_int)^2)*(c(rep(sort(gen)[2]-ming,nreps),rep(sort(gen)[-c(1,2)]-sort(gen)[-c(1,ng)],each=nreps)))/((2*rep(Ne,ng-1)))}
else { sigd_mat <- (cbind(x11R,x_int1)*(cbind(R1,pool_int)-cbind(x11R,x_int1))/cbind(R1,pool_int)^2)*(matrix(rep(c(rep(sort(gen)[2]-ming,nreps),rep(sort(gen)[-c(1,2)]-sort(gen)[-c(1,ng)],each=nreps)),nrow(R1))/((2*rep(Ne,ng-1))),nrow = nrow(R1), byrow = TRUE)) }
}
}
for (r in unique(repl)){
if (is.vector(sigd_mat)==TRUE) {sigd <- c(sigd, sum(sigd_mat[c(popInfo$repl[popInfo$gen==ming]==r,popInfo$repl[!(popInfo$gen %in% c(ming, maxg))]==r)]))}
else {sigd <- cbind(sigd,rowSums(sigd_mat[,c(popInfo$repl[popInfo$gen==ming]==r,popInfo$repl[!(popInfo$gen %in% c(ming, maxg))]==r)]))}
}
}
Ep2 <- (x11R/R1 + x21R/R2)/2
if(MeanStart==FALSE){Ep2 <- x11R/R1}
if (nrow(freq)==1) {stat <- sum(x11R - R1*x_1/n)^2/sum((R2/n)^2*(x11R*x12R/R1*(1+(R1-1)/x1)) + (R1/n)^2*(R2*Ep2-R2*Ep2^2+(R2*(R2-1)/x2)*(Ep2*(1-Ep2)+(x2-1)*(sigd)))) }
else { stat <- rowSums(x11R - R1*x_1/n)^2/rowSums((R2/n)^2*(x11R*x12R/R1*(1+(R1-1)/x1)) + (R1/n)^2*(R2*Ep2-R2*Ep2^2+(R2*(R2-1)/matrix(rep(x2,nrow(R2)),nrow=nrow(R2)))*(Ep2*(1-Ep2)+(matrix(rep(x2-1,nrow(Ep2)),nrow = nrow(Ep2)))*(sigd)))) }
} else {
stop("The number of populations (", npop, ") has to be a multiple of the length of 'poolSize' (", length(poolSize), ").")
}
}
# convert summary statistic to p-value and return it
stat[mask>0] <- NA
res <- pchisq(stat, df=1, lower.tail=FALSE)
if(!RetVal==0){
if(RetVal==1){
res <- stat
}else if(RetVal==2){
res <- cbind(stat,pchisq(stat, df=1, lower.tail=FALSE))
colnames(res) <- c("test_statistic", "p.value")
}
}
return(res)
}
|
### R code from vignette source 'landscape_simulation-knitr.Rnw'
###################################################
### code chunk number 1: landscape_simulation-knitr.Rnw:23-28
###################################################
library(MetaLandSim)
rl <- rland.graph(mapsize = 1000, dist_m = 60,
areaM = 0.5, areaSD = 0.2, Npatch =70,
disp = 100, plotG = TRUE)
###################################################
### code chunk number 2: landscape_simulation-knitr.Rnw:33-40
###################################################
library(MetaLandSim)
#The occupation of a landscape is simulated by:
sp_t0 <- species.graph(rl=rl, method="percentage", parm=50,
nsew="none", plotG=TRUE)
names(sp_t0)
###################################################
### code chunk number 3: landscape_simulation-knitr.Rnw:46-65
###################################################
data(param1)
sp_t1 <- spom(
sp_t0,
kern="op1",
conn="op1",
colnz="op1",
ext="op1",
param_df=param1,
beta1=NULL,
b=1,
c1=NULL,
c2=NULL,
z=NULL,
R=NULL
)
#Which has the following elements:
names(sp_t1)
###################################################
### code chunk number 4: landscape_simulation-knitr.Rnw:74-109
###################################################
#Loading species parameters
data(param1)
#Simulating occupation in dynamic landscape
it1 <- iterate.graph(
iter = 2,
mapsize = 1000,
dist_m = 30,
areaM = 0.5,
areaSD= 0.1,
Npatch = 200,
disp = 800,
span = 100,
par1 = "stoc",
par2 = 2,
par3 = 2,
method = "percentage",
parm = 50,
nsew = "none",
succ = "none",
param_df = param1,
kern = "op1",
conn = "op1",
colnz = "op1",
ext = "op1",
b = 1,
graph = FALSE
)
#This file is composed by the following elements:
names(it1)
| /inst/doc/landscape_simulation-knitr.R | no_license | cran/MetaLandSim | R | false | false | 1,887 | r | ### R code from vignette source 'landscape_simulation-knitr.Rnw'
###################################################
### code chunk number 1: landscape_simulation-knitr.Rnw:23-28
###################################################
library(MetaLandSim)
rl <- rland.graph(mapsize = 1000, dist_m = 60,
areaM = 0.5, areaSD = 0.2, Npatch =70,
disp = 100, plotG = TRUE)
###################################################
### code chunk number 2: landscape_simulation-knitr.Rnw:33-40
###################################################
library(MetaLandSim)
#The occupation of a landscape is simulated by:
sp_t0 <- species.graph(rl=rl, method="percentage", parm=50,
nsew="none", plotG=TRUE)
names(sp_t0)
###################################################
### code chunk number 3: landscape_simulation-knitr.Rnw:46-65
###################################################
data(param1)
sp_t1 <- spom(
sp_t0,
kern="op1",
conn="op1",
colnz="op1",
ext="op1",
param_df=param1,
beta1=NULL,
b=1,
c1=NULL,
c2=NULL,
z=NULL,
R=NULL
)
#Which has the following elements:
names(sp_t1)
###################################################
### code chunk number 4: landscape_simulation-knitr.Rnw:74-109
###################################################
#Loading species parameters
data(param1)
#Simulating occupation in dynamic landscape
it1 <- iterate.graph(
iter = 2,
mapsize = 1000,
dist_m = 30,
areaM = 0.5,
areaSD= 0.1,
Npatch = 200,
disp = 800,
span = 100,
par1 = "stoc",
par2 = 2,
par3 = 2,
method = "percentage",
parm = 50,
nsew = "none",
succ = "none",
param_df = param1,
kern = "op1",
conn = "op1",
colnz = "op1",
ext = "op1",
b = 1,
graph = FALSE
)
#This file is composed by the following elements:
names(it1)
|
with(af1c5edee200f4273b7460be6c765d341, {ROOT <- 'D:/SEMOSS_v4.0.0_x64/SEMOSS_v4.0.0_x64/semosshome/db/Atadata2__3b3e4a3b-d382-4e98-9950-9b4e8b308c1c/version/80bb2a25-ac5d-47d0-abfc-b3f3811f0936';source("D:/SEMOSS_v4.0.0_x64/SEMOSS_v4.0.0_x64/semosshome/R/Recommendations/advanced_federation_blend.r");a2Hrpdwy3col1<- as.character(FRAME878836$location);linkaIKKWY <- data.table("col1"=c("null"), "col2"=c("null")); linkaIKKWY <- unique(linkaIKKWY);aZn9P2vsK<- curate(a2Hrpdwy3col1,linkaIKKWY);aZn9P2vsK <- as.data.table(aZn9P2vsK);names(aZn9P2vsK)<-"asYm4Fj5G";FRAME878836 <- cbind(FRAME878836,aZn9P2vsK);FRAME878836 <- FRAME878836[,-c("location")];colnames(FRAME878836)[colnames(FRAME878836)=="asYm4Fj5G"] <- "location";rm(aZn9P2vsK,linkaIKKWY,a2Hrpdwy3col1,a2Hrpdwy3, best_match, best_match_nonzero, best_match_zero, blend, curate, self_match );}); | /80bb2a25-ac5d-47d0-abfc-b3f3811f0936/R/Temp/ayrOxjGcuzayj.R | no_license | ayanmanna8/test | R | false | false | 850 | r | with(af1c5edee200f4273b7460be6c765d341, {ROOT <- 'D:/SEMOSS_v4.0.0_x64/SEMOSS_v4.0.0_x64/semosshome/db/Atadata2__3b3e4a3b-d382-4e98-9950-9b4e8b308c1c/version/80bb2a25-ac5d-47d0-abfc-b3f3811f0936';source("D:/SEMOSS_v4.0.0_x64/SEMOSS_v4.0.0_x64/semosshome/R/Recommendations/advanced_federation_blend.r");a2Hrpdwy3col1<- as.character(FRAME878836$location);linkaIKKWY <- data.table("col1"=c("null"), "col2"=c("null")); linkaIKKWY <- unique(linkaIKKWY);aZn9P2vsK<- curate(a2Hrpdwy3col1,linkaIKKWY);aZn9P2vsK <- as.data.table(aZn9P2vsK);names(aZn9P2vsK)<-"asYm4Fj5G";FRAME878836 <- cbind(FRAME878836,aZn9P2vsK);FRAME878836 <- FRAME878836[,-c("location")];colnames(FRAME878836)[colnames(FRAME878836)=="asYm4Fj5G"] <- "location";rm(aZn9P2vsK,linkaIKKWY,a2Hrpdwy3col1,a2Hrpdwy3, best_match, best_match_nonzero, best_match_zero, blend, curate, self_match );}); |
batteryFile <- read.csv("C:/Users/Dharam/Downloads/microEMA/StudyFiles/Responses_uEMA/Battery/uema01_bat/battery.csv", header = TRUE, sep = ",")
batteryFile$BATTERY_LEVEL <- as.numeric(batteryFile$BATTERY_LEVEL)
batteryFile$START_TIME <- as.POSIXct(batteryFile$START_TIME, format = "%Y-%m-%d %H:%M:%OS")
library(plotly)
bateryPlot <- plot_ly(batteryFile, x = ~START_TIME, y=~BATTERY_LEVEL, color = ~BATTERY_CHARGING, type = "scatter", mode = "markers")
subplot(bateryPlot, responsePlot, responseTimePlot, shareX = TRUE, nrows = 3)
| /readBatteryFile.R | no_license | adityaponnada/Battery-Performance-Visualization | R | false | false | 537 | r | batteryFile <- read.csv("C:/Users/Dharam/Downloads/microEMA/StudyFiles/Responses_uEMA/Battery/uema01_bat/battery.csv", header = TRUE, sep = ",")
batteryFile$BATTERY_LEVEL <- as.numeric(batteryFile$BATTERY_LEVEL)
batteryFile$START_TIME <- as.POSIXct(batteryFile$START_TIME, format = "%Y-%m-%d %H:%M:%OS")
library(plotly)
bateryPlot <- plot_ly(batteryFile, x = ~START_TIME, y=~BATTERY_LEVEL, color = ~BATTERY_CHARGING, type = "scatter", mode = "markers")
subplot(bateryPlot, responsePlot, responseTimePlot, shareX = TRUE, nrows = 3)
|
### Terrence D. Jorgensen
### Last updated: 4 April 2017
### semTools functions for Nesting and Equivalence Testing
## -----------------
## Class and Methods
## -----------------
#' Class For the Result of Nesting and Equivalence Testing
#'
#' This class contains the results of nesting and equivalence testing among
#' multiple models
#'
#'
#' @name Net-class
#' @aliases Net-class show,Net-method summary,Net-method
#' @docType class
#' @slot test Logical \code{matrix} indicating nesting/equivalence among models
#' @slot df The degrees of freedom of tested models
#' @section Objects from the Class: Objects can be created via the
#' \code{\link{net}} function.
#' @return The \code{show} method displays the logical matrix of test results,
#' and the \code{summary} method is used to describe the results in narrative.
#' In either case, the original \code{object} is invisibly returned.
#' @author Terrence D. Jorgensen (University of Amsterdam;
#' \email{TJorgensen314@@gmail.com})
#' @seealso \code{\link{net}}
#' @examples
#'
#' # See the example in the net function.
#'
setClass("Net", representation(test = "matrix", df = "vector"))
#' @rdname Net-class
#' @aliases show,Net-method
setMethod("show", "Net",
function(object) {
if (length(object@test)) {
m <- as.matrix(unclass(object@test))
m[upper.tri(m, diag = TRUE)] <- ""
cat("
If cell [R, C] is TRUE, the model in row R is nested within column C.
If the models also have the same degrees of freedom, they are equivalent.
NA indicates the model in column C did not converge when fit to the
implied means and covariance matrix from the model in row R.
The hidden diagonal is TRUE because any model is equivalent to itself.
The upper triangle is hidden because for models with the same degrees
of freedom, cell [C, R] == cell [R, C]. For all models with different
degrees of freedom, the upper diagonal is all FALSE because models with
fewer degrees of freedom (i.e., more parameters) cannot be nested
within models with more degrees of freedom (i.e., fewer parameters).
\n")
print(m, quote = FALSE)
} else {
cat(data.class(object@test), "(0)\n", sep = "")
}
invisible(object)
})
#' @rdname Net-class
#' @aliases summary,Net-method
setMethod("summary", "Net",
function(object) {
DFs <- object@df
x <- object@test
mods <- colnames(x)
for (R in 2:nrow(x)) {
for (C in (R - 1):1) {
## if model didn't converge (logical value is missing), go to next iteration
if (is.na(x[R, C])) next
## if the models are not nested, go to next iteration
if (!x[R, C]) next
## choose message based on whether models are equivalent or nested
if (identical(DFs[R], DFs[C])) {
rel <- "equivalent to"
} else {
rel <- "nested within"
}
cat("Model \"", mods[R], "\" is ", rel, " model \"", mods[C], "\"\n", sep = "")
}
}
invisible(object)
})
## --------------------
## Constructor Function
## --------------------
#' Nesting and Equivalence Testing
#'
#' This test examines whether models are nested or equivalent based on Bentler
#' and Satorra's (2010) procedure.
#'
#' The concept of nesting/equivalence should be the same regardless of
#' estimation method. However, the particular method of testing
#' nesting/equivalence (as described in Bentler & Satorra, 2010) employed by
#' the net function analyzes summary statistics (model-implied means and
#' covariance matrices, not raw data). In the case of robust methods like MLR,
#' the raw data is only utilized for the robust adjustment to SE and chi-sq,
#' and the net function only checks the unadjusted chi-sq for the purposes of
#' testing nesting/equivalence. This method does not apply to models that
#' estimate thresholds for categorical data, so an error message will be issued
#' if such a model is provided.
#'
#' @param \dots The \code{lavaan} objects used for test of nesting and
#' equivalence
#' @param crit The upper-bound criterion for testing the equivalence of models.
#' Models are considered nested (or equivalent) if the difference between their
#' chi-squared fit statistics is less than this criterion.
#' @return The \linkS4class{Net} object representing the outputs for nesting
#' and equivalent testing, including a logical matrix of test results and a
#' vector of degrees of freedom for each model.
#' @author Terrence D. Jorgensen (University of Amsterdam;
#' \email{TJorgensen314@@gmail.com})
#' @references Bentler, P. M., & Satorra, A. (2010). Testing model nesting and
#' equivalence. \emph{Psychological Methods, 15}(2), 111-123.
#' doi:10.1037/a0019625
#' @examples
#'
#' \dontrun{
#' m1 <- ' visual =~ x1 + x2 + x3
#' textual =~ x4 + x5 + x6
#' speed =~ x7 + x8 + x9 '
#'
#'
#' m2 <- ' f1 =~ x1 + x2 + x3 + x4
#' f2 =~ x5 + x6 + x7 + x8 + x9 '
#'
#' m3 <- ' visual =~ x1 + x2 + x3
#' textual =~ eq*x4 + eq*x5 + eq*x6
#' speed =~ x7 + x8 + x9 '
#'
#' fit1 <- cfa(m1, data = HolzingerSwineford1939)
#' fit1a <- cfa(m1, data = HolzingerSwineford1939, std.lv = TRUE) # Equivalent to fit1
#' fit2 <- cfa(m2, data = HolzingerSwineford1939) # Not equivalent to or nested in fit1
#' fit3 <- cfa(m3, data = HolzingerSwineford1939) # Nested in fit1 and fit1a
#'
#' tests <- net(fit1, fit1a, fit2, fit3)
#' tests
#' summary(tests)
#' }
#'
net <- function(..., crit = .0001) {
## put fitted objects in a list
fitList <- list(...)
nFits <- length(fitList)
## check that they are all lavaan objects
notLavaan <- sapply(fitList, class) != "lavaan"
if (any(notLavaan)) {
fitNames <- sapply(as.list(substitute(list(...)))[-1], deparse)
stop(paste("The following arguments are not fitted lavaan objects:\n",
paste(fitNames[notLavaan], collapse = "\t")))
}
## check whether any models include categorical outcomes
catMod <- sapply(fitList, function(x) lavInspect(x, "options")$categorical)
if (any(catMod)) stop("This method only applies to continuous outcomes.")
## get degrees of freedom for each model
DFs <- sapply(fitList, function(x) lavInspect(x, "fit")["df"])
## name according to named objects, with DF in parentheses
fitNames <- names(fitList)
dotNames <- sapply(as.list(substitute(list(...)))[-1], deparse)
if (is.null(names(fitList))) {
fitNames <- dotNames
} else {
noName <- which(fitNames == "")
fitNames[noName] <- dotNames[noName]
}
names(fitList) <- paste(fitNames, " (df = ", DFs, ")", sep = "")
## sort list according to DFs
fitList <- fitList[order(DFs)]
fitNames <- fitNames[order(DFs)]
orderedDFs <- DFs[order(DFs)]
## create structure for sequence of tests (logical matrix), FALSE by default
nestMat <- matrix(FALSE, nFits, nFits, dimnames = list(names(fitList), fitNames))
diag(nestMat) <- TRUE # every model is equivalent with itself
## Loop through sorted models in sequence of most to least restricted model
for (R in 2:nrow(nestMat)) {
for (C in (R - 1):1) {
## test for nesting/equivalence
nestMat[R, C] <- x.within.y(x = fitList[[R]], y = fitList[[C]], crit = crit)
## if models are equivalent, set above-diagonal value to TRUE
if (identical(orderedDFs[R], orderedDFs[C])) nestMat[C, R] <- nestMat[R, C]
if (C == 1) next # to prevent the next 2 tests from returning an error
## if model didn't converge (logical value is missing), go to next iteration
if (is.na(nestMat[R, C]) | is.na(nestMat[R - 1, C - 1])) next
## check whether nesting is implied, to skip unnecessary tests
if (nestMat[R, C] & nestMat[R - 1, C - 1]) {
nestMat[R, C - 1] <- TRUE
next
}
}
}
out <- new("Net", test = nestMat, df = orderedDFs)
out
}
## --------------------------------------------------------------------
## Hidden Function to test whether model "x" is nested within model "y"
## --------------------------------------------------------------------
x.within.y <- function(x, y, crit = crit) {
if (length(c(lavaan::lavNames(x, "ov.ord"), lavaan::lavNames(y, "ov.ord"))))
stop("The net() function is not available for categorical-data estimators.")
exoX <- lavInspect(x, "options")$fixed.x & length(lavaan::lavNames(x, "ov.x"))
exoY <- lavInspect(y, "options")$fixed.x & length(lavaan::lavNames(y, "ov.x"))
if (exoX | exoY) {
stop(c("The net() function does not work with exogenous variables.\n",
"Fit the model again with 'fixed.x = FALSE'"))
}
## variable names
Xnames <- lavaan::lavNames(x)
Ynames <- lavaan::lavNames(y)
if (!identical(sort(Xnames), sort(Ynames)))
stop("Models do not contain the same variables")
## check that the analyzed data matches
# xData <- lavInspect(x, "data")
# if (is.list(xData)) xData <- do.call(rbind, xData)
# xData <- xData[ , rank(Xnames)]
# yData <- lavInspect(y, "data")
# if (is.list(yData)) yData <- do.call(rbind, yData)
# yData <- yData[ , rank(Ynames)]
# if (!identical(xData, yData)) stop("Models must apply to the same data")
##############################################################################
## check degrees of freedom support nesting structure
if (lavInspect(x, "fit")["df"] < lavInspect(y, "fit")["df"])
stop("x cannot be nested within y because y is more restricted than x")
## model-implied moments
Sigma <- lavInspect(x, "cov.ov")
Mu <- lavInspect(x, "mean.ov")
N <- lavInspect(x, "nobs")
## fit model and check that chi-squared < crit
suppressWarnings(try(newFit <- lavaan::update(y, data = NULL,
sample.cov = Sigma,
sample.mean = Mu,
sample.nobs = N)))
if(!lavInspect(newFit, "converged")) return(NA) else {
result <- lavInspect(newFit, "fit")["chisq"] < crit
names(result) <- NULL
if (lavInspect(x, "fit")["df"] ==
lavInspect(y, "fit")["df"]) return(c(Equivalent = result))
}
c(Nested = result)
}
| /semTools/R/NET.R | no_license | jknowles/semTools | R | false | false | 10,156 | r | ### Terrence D. Jorgensen
### Last updated: 4 April 2017
### semTools functions for Nesting and Equivalence Testing
## -----------------
## Class and Methods
## -----------------
#' Class For the Result of Nesting and Equivalence Testing
#'
#' This class contains the results of nesting and equivalence testing among
#' multiple models
#'
#'
#' @name Net-class
#' @aliases Net-class show,Net-method summary,Net-method
#' @docType class
#' @slot test Logical \code{matrix} indicating nesting/equivalence among models
#' @slot df The degrees of freedom of tested models
#' @section Objects from the Class: Objects can be created via the
#' \code{\link{net}} function.
#' @return The \code{show} method displays the logical matrix of test results,
#' and the \code{summary} method is used to describe the results in narrative.
#' In either case, the original \code{object} is invisibly returned.
#' @author Terrence D. Jorgensen (University of Amsterdam;
#' \email{TJorgensen314@@gmail.com})
#' @seealso \code{\link{net}}
#' @examples
#'
#' # See the example in the net function.
#'
setClass("Net", representation(test = "matrix", df = "vector"))
#' @rdname Net-class
#' @aliases show,Net-method
setMethod("show", "Net",
function(object) {
if (length(object@test)) {
m <- as.matrix(unclass(object@test))
m[upper.tri(m, diag = TRUE)] <- ""
cat("
If cell [R, C] is TRUE, the model in row R is nested within column C.
If the models also have the same degrees of freedom, they are equivalent.
NA indicates the model in column C did not converge when fit to the
implied means and covariance matrix from the model in row R.
The hidden diagonal is TRUE because any model is equivalent to itself.
The upper triangle is hidden because for models with the same degrees
of freedom, cell [C, R] == cell [R, C]. For all models with different
degrees of freedom, the upper diagonal is all FALSE because models with
fewer degrees of freedom (i.e., more parameters) cannot be nested
within models with more degrees of freedom (i.e., fewer parameters).
\n")
print(m, quote = FALSE)
} else {
cat(data.class(object@test), "(0)\n", sep = "")
}
invisible(object)
})
#' @rdname Net-class
#' @aliases summary,Net-method
setMethod("summary", "Net",
function(object) {
DFs <- object@df
x <- object@test
mods <- colnames(x)
for (R in 2:nrow(x)) {
for (C in (R - 1):1) {
## if model didn't converge (logical value is missing), go to next iteration
if (is.na(x[R, C])) next
## if the models are not nested, go to next iteration
if (!x[R, C]) next
## choose message based on whether models are equivalent or nested
if (identical(DFs[R], DFs[C])) {
rel <- "equivalent to"
} else {
rel <- "nested within"
}
cat("Model \"", mods[R], "\" is ", rel, " model \"", mods[C], "\"\n", sep = "")
}
}
invisible(object)
})
## --------------------
## Constructor Function
## --------------------
#' Nesting and Equivalence Testing
#'
#' This test examines whether models are nested or equivalent based on Bentler
#' and Satorra's (2010) procedure.
#'
#' The concept of nesting/equivalence should be the same regardless of
#' estimation method. However, the particular method of testing
#' nesting/equivalence (as described in Bentler & Satorra, 2010) employed by
#' the net function analyzes summary statistics (model-implied means and
#' covariance matrices, not raw data). In the case of robust methods like MLR,
#' the raw data is only utilized for the robust adjustment to SE and chi-sq,
#' and the net function only checks the unadjusted chi-sq for the purposes of
#' testing nesting/equivalence. This method does not apply to models that
#' estimate thresholds for categorical data, so an error message will be issued
#' if such a model is provided.
#'
#' @param \dots The \code{lavaan} objects used for test of nesting and
#' equivalence
#' @param crit The upper-bound criterion for testing the equivalence of models.
#' Models are considered nested (or equivalent) if the difference between their
#' chi-squared fit statistics is less than this criterion.
#' @return The \linkS4class{Net} object representing the outputs for nesting
#' and equivalent testing, including a logical matrix of test results and a
#' vector of degrees of freedom for each model.
#' @author Terrence D. Jorgensen (University of Amsterdam;
#' \email{TJorgensen314@@gmail.com})
#' @references Bentler, P. M., & Satorra, A. (2010). Testing model nesting and
#' equivalence. \emph{Psychological Methods, 15}(2), 111-123.
#' doi:10.1037/a0019625
#' @examples
#'
#' \dontrun{
#' m1 <- ' visual =~ x1 + x2 + x3
#' textual =~ x4 + x5 + x6
#' speed =~ x7 + x8 + x9 '
#'
#'
#' m2 <- ' f1 =~ x1 + x2 + x3 + x4
#' f2 =~ x5 + x6 + x7 + x8 + x9 '
#'
#' m3 <- ' visual =~ x1 + x2 + x3
#' textual =~ eq*x4 + eq*x5 + eq*x6
#' speed =~ x7 + x8 + x9 '
#'
#' fit1 <- cfa(m1, data = HolzingerSwineford1939)
#' fit1a <- cfa(m1, data = HolzingerSwineford1939, std.lv = TRUE) # Equivalent to fit1
#' fit2 <- cfa(m2, data = HolzingerSwineford1939) # Not equivalent to or nested in fit1
#' fit3 <- cfa(m3, data = HolzingerSwineford1939) # Nested in fit1 and fit1a
#'
#' tests <- net(fit1, fit1a, fit2, fit3)
#' tests
#' summary(tests)
#' }
#'
net <- function(..., crit = .0001) {
## put fitted objects in a list
fitList <- list(...)
nFits <- length(fitList)
## check that they are all lavaan objects
notLavaan <- sapply(fitList, class) != "lavaan"
if (any(notLavaan)) {
fitNames <- sapply(as.list(substitute(list(...)))[-1], deparse)
stop(paste("The following arguments are not fitted lavaan objects:\n",
paste(fitNames[notLavaan], collapse = "\t")))
}
## check whether any models include categorical outcomes
catMod <- sapply(fitList, function(x) lavInspect(x, "options")$categorical)
if (any(catMod)) stop("This method only applies to continuous outcomes.")
## get degrees of freedom for each model
DFs <- sapply(fitList, function(x) lavInspect(x, "fit")["df"])
## name according to named objects, with DF in parentheses
fitNames <- names(fitList)
dotNames <- sapply(as.list(substitute(list(...)))[-1], deparse)
if (is.null(names(fitList))) {
fitNames <- dotNames
} else {
noName <- which(fitNames == "")
fitNames[noName] <- dotNames[noName]
}
names(fitList) <- paste(fitNames, " (df = ", DFs, ")", sep = "")
## sort list according to DFs
fitList <- fitList[order(DFs)]
fitNames <- fitNames[order(DFs)]
orderedDFs <- DFs[order(DFs)]
## create structure for sequence of tests (logical matrix), FALSE by default
nestMat <- matrix(FALSE, nFits, nFits, dimnames = list(names(fitList), fitNames))
diag(nestMat) <- TRUE # every model is equivalent with itself
## Loop through sorted models in sequence of most to least restricted model
for (R in 2:nrow(nestMat)) {
for (C in (R - 1):1) {
## test for nesting/equivalence
nestMat[R, C] <- x.within.y(x = fitList[[R]], y = fitList[[C]], crit = crit)
## if models are equivalent, set above-diagonal value to TRUE
if (identical(orderedDFs[R], orderedDFs[C])) nestMat[C, R] <- nestMat[R, C]
if (C == 1) next # to prevent the next 2 tests from returning an error
## if model didn't converge (logical value is missing), go to next iteration
if (is.na(nestMat[R, C]) | is.na(nestMat[R - 1, C - 1])) next
## check whether nesting is implied, to skip unnecessary tests
if (nestMat[R, C] & nestMat[R - 1, C - 1]) {
nestMat[R, C - 1] <- TRUE
next
}
}
}
out <- new("Net", test = nestMat, df = orderedDFs)
out
}
## --------------------------------------------------------------------
## Hidden Function to test whether model "x" is nested within model "y"
## --------------------------------------------------------------------
x.within.y <- function(x, y, crit = crit) {
if (length(c(lavaan::lavNames(x, "ov.ord"), lavaan::lavNames(y, "ov.ord"))))
stop("The net() function is not available for categorical-data estimators.")
exoX <- lavInspect(x, "options")$fixed.x & length(lavaan::lavNames(x, "ov.x"))
exoY <- lavInspect(y, "options")$fixed.x & length(lavaan::lavNames(y, "ov.x"))
if (exoX | exoY) {
stop(c("The net() function does not work with exogenous variables.\n",
"Fit the model again with 'fixed.x = FALSE'"))
}
## variable names
Xnames <- lavaan::lavNames(x)
Ynames <- lavaan::lavNames(y)
if (!identical(sort(Xnames), sort(Ynames)))
stop("Models do not contain the same variables")
## check that the analyzed data matches
# xData <- lavInspect(x, "data")
# if (is.list(xData)) xData <- do.call(rbind, xData)
# xData <- xData[ , rank(Xnames)]
# yData <- lavInspect(y, "data")
# if (is.list(yData)) yData <- do.call(rbind, yData)
# yData <- yData[ , rank(Ynames)]
# if (!identical(xData, yData)) stop("Models must apply to the same data")
##############################################################################
## check degrees of freedom support nesting structure
if (lavInspect(x, "fit")["df"] < lavInspect(y, "fit")["df"])
stop("x cannot be nested within y because y is more restricted than x")
## model-implied moments
Sigma <- lavInspect(x, "cov.ov")
Mu <- lavInspect(x, "mean.ov")
N <- lavInspect(x, "nobs")
## fit model and check that chi-squared < crit
suppressWarnings(try(newFit <- lavaan::update(y, data = NULL,
sample.cov = Sigma,
sample.mean = Mu,
sample.nobs = N)))
if(!lavInspect(newFit, "converged")) return(NA) else {
result <- lavInspect(newFit, "fit")["chisq"] < crit
names(result) <- NULL
if (lavInspect(x, "fit")["df"] ==
lavInspect(y, "fit")["df"]) return(c(Equivalent = result))
}
c(Nested = result)
}
|
# distribution = gl$multiple_comparison$visibility$uncorrected$distribution
# sdistribution = gl$multiple_comparison$visibility$uncorrected$sdistribution
# threshold = gl$multiple_comparison$visibility$slope$threshold
# aggr_FUN = sum
# bw = bw
# alternative = "two.sided"
#
compute_clustermass_slopebinder <- function (distribution, sdistribution, threshold, aggr_FUN, alternative = "two.sided"){
switch(alternative,
two.sided = {
distribution <- abs(distribution)
sdistribution <- abs(sdistribution)
threshold <- abs(threshold)
selected <- (distribution > threshold)
sselected <- (sdistribution > threshold)
selected_join <- selected|sselected
extreme = function(x) max(x, na.rm = T)
})
cl_join = (selected_join-cbind(0,selected_join[,-NCOL(selected_join), drop = F]))==1
cl_join = t(apply(cl_join,1,cumsum))*selected_join
cl = selected*cl_join
scl = sselected*cl_join
mass_distribution = sapply(1:(dim(selected_join)[1]),function(permi){
max(sapply(1:max(1,max(cl_join[permi,])),function(i_in_p){
aggr_FUN(c(distribution[permi,cl[permi,]==i_in_p]))}))})
mass_statistic = sapply(1:max(1,max(cl_join[1,])), function(i) {
aggr_FUN(c(distribution[1,cl[1,]==i]))
})
pvalue = sapply(mass_statistic, function(mi) permuco:::compute_pvalue(stat = mi,
distribution = mass_distribution, alternative = "two.sided"))
main = cbind(statistic =c(NA, mass_statistic)[cl[1, ] + 1],
pvalue = c(NA, pvalue)[cl[1, ] + 1],
cluster_id = cl[1, ])
statistic = cbind(mean = c(NA, mass_statistic)[cl[1,]+1],slope = rep(NA,dim(distribution)[2]))
pvalue = cbind(mean = c(NA, pvalue)[cl[1,]+1],slope = rep(NA,dim(distribution)[2]))
cluster_id = cbind(mean = cl[1,], slope = rep(0,dim(distribution)[2]))
main_split = list(statistic = statistic, pvalue = pvalue,
cluster_id = cluster_id)
out = list(main = main, main_split = main_split, distribution = mass_distribution, threshold = threshold)
return(out)
}
# compute_clustermass_slopebinder2 <- function (distribution, sdistribution, threshold, aggr_FUN, alternative = "two.sided"){
# switch(alternative,
# two.sided = {
# distribution <- abs(distribution)
# sdistribution <- abs(sdistribution)
# threshold <- abs(threshold)
# selected <- (distribution > threshold)
# sselected <- (sdistribution > threshold)
# selected_join <- selected|sselected
# extreme = function(x) max(x, na.rm = T)
# })
#
# cl_join = (selected_join-cbind(0,selected_join[,-NCOL(selected_join), drop = F]))==1
# cl_join = t(apply(cl_join,1,cumsum))*selected_join
#
# cl = selected*cl_join
# #scl = sselected*cl_join
# cl_join = t(sapply(1:(dim(selected_join)[1]),function(permi){
# cli = cl[permi,]
# ui = unique(cli)
# ui = ui[ui!=0]
# if(length(ui)==0){
# cli = rep(0,ncol(cl))}else{
# for(ii in 1:length(ui)){
# cli[cli==(ui[[ii]])]=ii
# }}
# cli}
# ))
#
# mass_distribution = sapply(1:(dim(selected_join)[1]),function(permi){
# max(sapply(1:max(1,max(cl_join[permi,])),function(i_in_p){
# aggr_FUN(c(distribution[permi,cl_join[permi,]==i_in_p]))}))})
#
# mass_statistic = sapply(1:max(1,max(cl_join[1,])), function(i) {
# aggr_FUN(c(distribution[1,cl[1,]==i]))
# })
#
# pvalue = sapply(mass_statistic, function(mi) permuco:::compute_pvalue(stat = mi,
# distribution = mass_distribution, alternative = "two.sided"))
#
#
# main = cbind(statistic =c(NA, mass_statistic)[cl_join[1, ] + 1],
# pvalue = c(NA, pvalue)[cl_join[1, ] + 1],
# cluster_id = cl_join[1, ])
#
# statistic = cbind(mean = c(NA, mass_statistic)[cl[1,]+1],slope = c(NA, mass_statistic)[scl[1,]+1])
# pvalue = cbind(mean = c(NA, pvalue)[cl[1,]+1],slope = c(NA, pvalue)[scl[1,]+1])
# cluster_id = cbind(mean = cl[1,], slope = scl[1,])
#
#
#
# main_split = list(statistic = statistic, pvalue = pvalue,
# cluster_id = cluster_id)
#
# out = list(main = main, main_split = main_split, distribution = mass_distribution, threshold = threshold)
# return(out)
# }
| /R/compute_clustermass_slopebinder.R | no_license | jaromilfrossard/permucoSlope | R | false | false | 4,412 | r | # distribution = gl$multiple_comparison$visibility$uncorrected$distribution
# sdistribution = gl$multiple_comparison$visibility$uncorrected$sdistribution
# threshold = gl$multiple_comparison$visibility$slope$threshold
# aggr_FUN = sum
# bw = bw
# alternative = "two.sided"
#
compute_clustermass_slopebinder <- function (distribution, sdistribution, threshold, aggr_FUN, alternative = "two.sided"){
switch(alternative,
two.sided = {
distribution <- abs(distribution)
sdistribution <- abs(sdistribution)
threshold <- abs(threshold)
selected <- (distribution > threshold)
sselected <- (sdistribution > threshold)
selected_join <- selected|sselected
extreme = function(x) max(x, na.rm = T)
})
cl_join = (selected_join-cbind(0,selected_join[,-NCOL(selected_join), drop = F]))==1
cl_join = t(apply(cl_join,1,cumsum))*selected_join
cl = selected*cl_join
scl = sselected*cl_join
mass_distribution = sapply(1:(dim(selected_join)[1]),function(permi){
max(sapply(1:max(1,max(cl_join[permi,])),function(i_in_p){
aggr_FUN(c(distribution[permi,cl[permi,]==i_in_p]))}))})
mass_statistic = sapply(1:max(1,max(cl_join[1,])), function(i) {
aggr_FUN(c(distribution[1,cl[1,]==i]))
})
pvalue = sapply(mass_statistic, function(mi) permuco:::compute_pvalue(stat = mi,
distribution = mass_distribution, alternative = "two.sided"))
main = cbind(statistic =c(NA, mass_statistic)[cl[1, ] + 1],
pvalue = c(NA, pvalue)[cl[1, ] + 1],
cluster_id = cl[1, ])
statistic = cbind(mean = c(NA, mass_statistic)[cl[1,]+1],slope = rep(NA,dim(distribution)[2]))
pvalue = cbind(mean = c(NA, pvalue)[cl[1,]+1],slope = rep(NA,dim(distribution)[2]))
cluster_id = cbind(mean = cl[1,], slope = rep(0,dim(distribution)[2]))
main_split = list(statistic = statistic, pvalue = pvalue,
cluster_id = cluster_id)
out = list(main = main, main_split = main_split, distribution = mass_distribution, threshold = threshold)
return(out)
}
# compute_clustermass_slopebinder2 <- function (distribution, sdistribution, threshold, aggr_FUN, alternative = "two.sided"){
# switch(alternative,
# two.sided = {
# distribution <- abs(distribution)
# sdistribution <- abs(sdistribution)
# threshold <- abs(threshold)
# selected <- (distribution > threshold)
# sselected <- (sdistribution > threshold)
# selected_join <- selected|sselected
# extreme = function(x) max(x, na.rm = T)
# })
#
# cl_join = (selected_join-cbind(0,selected_join[,-NCOL(selected_join), drop = F]))==1
# cl_join = t(apply(cl_join,1,cumsum))*selected_join
#
# cl = selected*cl_join
# #scl = sselected*cl_join
# cl_join = t(sapply(1:(dim(selected_join)[1]),function(permi){
# cli = cl[permi,]
# ui = unique(cli)
# ui = ui[ui!=0]
# if(length(ui)==0){
# cli = rep(0,ncol(cl))}else{
# for(ii in 1:length(ui)){
# cli[cli==(ui[[ii]])]=ii
# }}
# cli}
# ))
#
# mass_distribution = sapply(1:(dim(selected_join)[1]),function(permi){
# max(sapply(1:max(1,max(cl_join[permi,])),function(i_in_p){
# aggr_FUN(c(distribution[permi,cl_join[permi,]==i_in_p]))}))})
#
# mass_statistic = sapply(1:max(1,max(cl_join[1,])), function(i) {
# aggr_FUN(c(distribution[1,cl[1,]==i]))
# })
#
# pvalue = sapply(mass_statistic, function(mi) permuco:::compute_pvalue(stat = mi,
# distribution = mass_distribution, alternative = "two.sided"))
#
#
# main = cbind(statistic =c(NA, mass_statistic)[cl_join[1, ] + 1],
# pvalue = c(NA, pvalue)[cl_join[1, ] + 1],
# cluster_id = cl_join[1, ])
#
# statistic = cbind(mean = c(NA, mass_statistic)[cl[1,]+1],slope = c(NA, mass_statistic)[scl[1,]+1])
# pvalue = cbind(mean = c(NA, pvalue)[cl[1,]+1],slope = c(NA, pvalue)[scl[1,]+1])
# cluster_id = cbind(mean = cl[1,], slope = scl[1,])
#
#
#
# main_split = list(statistic = statistic, pvalue = pvalue,
# cluster_id = cluster_id)
#
# out = list(main = main, main_split = main_split, distribution = mass_distribution, threshold = threshold)
# return(out)
# }
|
###Final Project
library("tidyr")
library("fOptions")
library("dplyr")
library("fGarch")
library("rmgarch")
##assupmtion:
##initial date: 06/27/2019
##one-month libor rate(annualized) on initial date was 2.40238%(Sources: Fred Economics Data)
r_f = 2.40238/100
##the sources of historical stock price data come from yahoofinance.com. I hope it works
##Here are the ticker list of the Dow Jones Index and the other 29 component stocks in the Dow Jones index:
ticker <- c("^DJI","AAPL","AXP","BA","CAT","CSCO","CVX","DIS","GS","HD","IBM","INTC","JNJ","JPM",
"KO","MCD","MMM","MRK","MSFT","NKE","PFE","PG","TRV","UNH","UTX","V","VZ","WBA","WMT","XOM")
folder <- "C:/Users/z/Desktop/FIN 567/project/stock price and log return data/"
g <- ".csv"
datafiles <- paste0(rep(folder,30),ticker,rep(g,30))
start <- "2017-06-27"
end <- "2019-06-27"
##There are 503 rows of historical data for (log) stock returns(two years of historical stock data)
stock_return <- matrix(rep(0,503*30),nrow=503,ncol=30)
colnames(stock_return) <- ticker
stock_price_time0 <- rep(0,30)
for (i in 1:30) {
data1 <- read.csv(datafiles[i],stringsAsFactors = FALSE)
data1$Date <- as.Date(data1$Date)
data2 <- drop_na(data1[which(data1>start & data1<=end),])
stock_return[,i] <- data2[,3]
stock_price_time0[i] <- data2[nrow(data2),2]
}
## now we need to handle some dividend rate for all the stocks inside our portfolio:
## you can see we have already done some preliminary calculation for dividend yieldsin another R script,
## which saves us a lot of energy and time
folder1 <- "C:/Users/z/Desktop/FIN 567/project/dividend rate/"
g1 <- "_div_yield.csv"
datafiles1 <- paste0(rep(folder1,30),ticker,rep(g1,30))
div <- rep(0,30)
for (i in 1:30) {
data3 <- read.csv(datafiles1[i],stringsAsFactors = FALSE)[,2]
if (length(data3)!=21){
print("error")
}
div[i] <- data3[21]
}
cost_of_carry <- r_f-div
##now we want to calculate the K here to determine the composition of the two portfolios for both methods(k1: vega_neutral, k2: theta_neutral)
time0 <- read.csv("C:/Users/z/Desktop/FIN 567/project/calculate k/all the options on 20190627.csv",)
ticker_new <- data.frame(c("DJX",ticker[-1]))
names(ticker_new) <- "ticker"
time0_call <- time0[which(time0$days == 30 & time0$delta ==50),]
time0_put <- time0[which(time0$days == 30 & time0$delta ==-50),]
##just changing the order of the historical data by the order of ticker_new and remove some unnecessary columns
time0_call <- left_join(ticker_new, time0_call, by = "ticker")[,c("ticker","impl_volatility","impl_strike")]
time0_put <- left_join(ticker_new, time0_put, by = "ticker")[,c("ticker","impl_volatility","impl_strike")]
vega_time0_call <- rep(0,30)
vega_time0_put <- rep(0,30)
theta_time0_call <- rep(0,30)
theta_time0_put <- rep(0,30)
delta_time0_call <- rep(0,30)
delta_time0_put <- rep(0,30)
gamma_time0_call <- rep(0,30)
gamma_time0_put <- rep(0,30)
price_time0_call <- rep(0,30)
price_time0_put <- rep(0,30)
tau_time0 <- 21/252
vol_time0_call <- time0_call[,2]
vol_time0_put <- time0_put[,2]
X_time0_call <- time0_call[,3]
X_time0_put <- time0_put[,3]
for (i in 1:30) {
if(i==1){
s_time0 <- stock_price_time0[i]/100
vega_time0_call[i] <- GBSGreeks(Selection = "vega", TypeFlag = "c", S = s_time0, X = X_time0_call[i], Time = tau_time0, r = r_f,
b = cost_of_carry[i], sigma = vol_time0_call[i])
vega_time0_put[i] <- GBSGreeks(Selection = "vega", TypeFlag = "p", S = s_time0, X = X_time0_put[i], Time = tau_time0, r = r_f,
b = cost_of_carry[i], sigma = vol_time0_put[i])
theta_time0_call[i] <- GBSGreeks(Selection = "theta", TypeFlag = "c", S = s_time0, X = X_time0_call[i], Time = tau_time0, r = r_f,
b = cost_of_carry[i], sigma = vol_time0_call[i])
theta_time0_put[i] <- GBSGreeks(Selection = "theta", TypeFlag = "p", S = s_time0, X = X_time0_put[i], Time = tau_time0, r = r_f,
b = cost_of_carry[i], sigma = vol_time0_put[i])
delta_time0_call[i] <- GBSGreeks(Selection = "delta", TypeFlag = "c", S = s_time0, X = X_time0_call[i], Time = tau_time0, r = r_f,
b = cost_of_carry[i], sigma = vol_time0_call[i])
delta_time0_put[i] <- GBSGreeks(Selection = "delta", TypeFlag = "p", S = s_time0, X = X_time0_put[i], Time = tau_time0, r = r_f,
b = cost_of_carry[i], sigma = vol_time0_put[i])
gamma_time0_call[i] <- GBSGreeks(Selection = "gamma", TypeFlag = "c", S = s_time0, X = X_time0_call[i], Time = tau_time0, r = r_f,
b = cost_of_carry[i], sigma = vol_time0_call[i])
gamma_time0_put[i] <- GBSGreeks(Selection = "gamma", TypeFlag = "p", S = s_time0, X = X_time0_put[i], Time = tau_time0, r = r_f,
b = cost_of_carry[i], sigma = vol_time0_put[i])
price_time0_call[i] <- GBSOption(TypeFlag = "c", S = s_time0, X = X_time0_call[i], Time = tau_time0, r = r_f,
b = cost_of_carry[i], sigma = vol_time0_call[i])@price
price_time0_put[i] <- GBSOption(TypeFlag = "p", S = s_time0, X = X_time0_put[i], Time = tau_time0, r = r_f,
b = cost_of_carry[i], sigma = vol_time0_put[i])@price
}else{
vega_time0_call[i] <- GBSGreeks(Selection = "vega", TypeFlag = "c", S = stock_price_time0[i], X = X_time0_call[i], Time = tau_time0, r = r_f,
b = cost_of_carry[i], sigma = vol_time0_call[i])
vega_time0_put[i] <- GBSGreeks(Selection = "vega", TypeFlag = "p", S = stock_price_time0[i], X = X_time0_put[i], Time = tau_time0, r = r_f,
b = cost_of_carry[i], sigma = vol_time0_put[i])
theta_time0_call[i] <- GBSGreeks(Selection = "theta", TypeFlag = "c", S = stock_price_time0[i], X = X_time0_call[i], Time = tau_time0, r = r_f,
b = cost_of_carry[i], sigma = vol_time0_call[i])
theta_time0_put[i] <- GBSGreeks(Selection = "theta", TypeFlag = "p", S = stock_price_time0[i], X = X_time0_put[i], Time = tau_time0, r = r_f,
b = cost_of_carry[i], sigma = vol_time0_put[i])
delta_time0_call[i] <- GBSGreeks(Selection = "delta", TypeFlag = "c", S = stock_price_time0[i], X = X_time0_call[i], Time = tau_time0, r = r_f,
b = cost_of_carry[i], sigma = vol_time0_call[i])
delta_time0_put[i] <- GBSGreeks(Selection = "delta", TypeFlag = "p", S = stock_price_time0[i], X = X_time0_put[i], Time = tau_time0, r = r_f,
b = cost_of_carry[i], sigma = vol_time0_put[i])
gamma_time0_call[i] <- GBSGreeks(Selection = "gamma", TypeFlag = "c", S = stock_price_time0[i], X = X_time0_call[i], Time = tau_time0, r = r_f,
b = cost_of_carry[i], sigma = vol_time0_call[i])
gamma_time0_put[i] <- GBSGreeks(Selection = "gamma", TypeFlag = "p", S = stock_price_time0[i], X = X_time0_put[i], Time = tau_time0, r = r_f,
b = cost_of_carry[i], sigma = vol_time0_put[i])
price_time0_call[i] <- CRRBinomialTreeOption(TypeFlag = "ca", S=stock_price_time0[i], X=X_time0_call[i], Time = tau_time0,
r=r_f, b=cost_of_carry[i], sigma=vol_time0_call[i], n=tau_time0*252, title = NULL, description = NULL)@price
price_time0_put[i] <- CRRBinomialTreeOption(TypeFlag = "pa", S=stock_price_time0[i], X=X_time0_put[i], Time = tau_time0,
r=r_f, b=cost_of_carry[i], sigma=vol_time0_put[i], n=tau_time0*252, title = NULL, description = NULL)@price
}
}
k_vega_neutral <- (vega_time0_call[1]+vega_time0_put[1])/sum(vega_time0_call[2:30],vega_time0_put[2:30])
k_vega_neutral
k_theta_neutral <- (theta_time0_call[1]+theta_time0_put[1])/sum(theta_time0_call[2:30],theta_time0_put[2:30])
k_theta_neutral
##if we assume we sell one index straddle and buy kwi components stocks straddles(wi is equal among all
## the component stocks, which is 1/29), we generate the value of new k here for two formulas
k_vega_actual <- k_vega_neutral * 29
k_vega_actual
k_theta_actual <- k_theta_neutral * 29
k_theta_actual
## calculating greek letter risks of both portfolios constructed on time 0 after getting the value of k
## vega_neutral
vega_port_vega <- -(vega_time0_call[1] + vega_time0_put[1])+ k_vega_neutral * sum(vega_time0_call[2:30],vega_time0_put[2:30])
vega_port_vega
theta_port_vega <- -(theta_time0_call[1] + theta_time0_put[1])+ k_vega_neutral * sum(theta_time0_call[2:30],theta_time0_put[2:30])
theta_port_vega
gamma_port_vega <- -(gamma_time0_call[1] + gamma_time0_put[1])+ k_vega_neutral * sum(gamma_time0_call[2:30],gamma_time0_put[2:30])
gamma_port_vega
delta_port_vega <- -(delta_time0_call[1] + delta_time0_put[1])+ k_vega_neutral * sum(delta_time0_call[2:30],delta_time0_put[2:30])
delta_port_vega
## theta_neutral(Greek Letter Risks)
vega_port_theta <- -(vega_time0_call[1] + vega_time0_put[1])+ k_theta_neutral * sum(vega_time0_call[2:30],vega_time0_put[2:30])
vega_port_theta
theta_port_theta <- -(theta_time0_call[1] + theta_time0_put[1])+ k_theta_neutral * sum(theta_time0_call[2:30],theta_time0_put[2:30])
theta_port_theta
gamma_port_theta <- -(gamma_time0_call[1] + gamma_time0_put[1])+ k_theta_neutral * sum(gamma_time0_call[2:30],gamma_time0_put[2:30])
gamma_port_theta
delta_port_theta <- -(delta_time0_call[1] + delta_time0_put[1])+ k_theta_neutral * sum(delta_time0_call[2:30],delta_time0_put[2:30])
delta_port_theta
##calculate the initial portfolio value at time 0 for both methods:
port_time0_vega_neutral <- -(price_time0_call[1]+price_time0_put[1])+k_vega_neutral*sum(price_time0_call[2:30],price_time0_put[2:30])
port_time0_vega_neutral
port_time0_theta_neutral <- -(price_time0_call[1]+price_time0_put[1])+k_theta_neutral*sum(price_time0_call[2:30],price_time0_put[2:30])
port_time0_theta_neutral
##Next we do some data pre-processing of the historical implied volatilities of all the component options inside the Dow Jones index
his_vol <- read.csv("C:/Users/z/Desktop/FIN 567/project/historical option data of all the options/historical option data.csv",)
his_vol_important <- his_vol[,c(2,3,4,5,7,8)]
his_call <- his_vol_important[which(his_vol_important$days == 30 & his_vol_important$delta ==50),]
his_put <- his_vol_important[which(his_vol_important$days == 30 & his_vol_important$delta ==-50),]
vol_call_log <- matrix(rep(0,30*503),nrow = 503, ncol = 30)
vol_put_log <- matrix(rep(0,30*503),nrow = 503, ncol = 30)
colnames(vol_call_log) <- paste0(ticker_new[,1],rep("_vol_call",30))
colnames(vol_put_log) <- paste0(ticker_new[,1],rep("_vol_put",30))
vol_call_time0 <- rep(0,30)
vol_put_time0 <- rep(0,30)
for (i in 1:30) {
call_one <- his_call[which(his_call$ticker==ticker_new[i,1]),"impl_volatility"]
put_one <- his_put[which(his_put$ticker==ticker_new[i,1]),"impl_volatility"]
vol_call_time0[i] <- call_one[length(call_one)]
vol_put_time0[i] <- put_one[length(put_one)]
vol_call_log[,i]=log(call_one[2:length(call_one)]/call_one[1:length(call_one)-1])
vol_put_log[,i]=log(put_one[2:length(put_one)]/put_one[1:length(put_one)-1])
}
colnames(stock_return) <- ticker_new[,1]
market_factor <- cbind(stock_return,vol_call_log,vol_put_log) ##important very important
## As an initial step (not required), estimate a univariate GARCH model for each return process(there are 93 factors)
## Specify univariate GARCH(1,1) model and set mean return = 0
uspec <- ugarchspec(variance.model = list(model = "sGARCH", garchOrder = c(1,1)),
mean.model = list(armaOrder = c(0,0), include.mean = FALSE),
distribution.model = "norm")
## Check the univariate specification for the 90 series, actually we don't really care about the
## parameters for each garch model, what we really want is the daily shock and the estimated sigma
## on last day for each market factor to simulate sigma
sigma_tplus1 <- rep(0,90)
daily_shock <- matrix(rep(0, 90*503),nrow=503,ncol=90)
colnames(daily_shock) <- colnames(market_factor)
## we only record the parameters of the garch(1,1) model for the log_return of stocks for future use
garch_stock_coef <- matrix(rep(0, 30*3), nrow = 3, ncol=30)
colnames(garch_stock_coef) <- ticker_new[,1]
for (j in 1:90) {
log_return <- market_factor[,j]
fit.marg <- ugarchfit(spec = uspec, data = log_return)
if (j <= 30) {
garch_stock_coef[,j] <- coef(fit.marg)
}
daily_shock[,j] <- fit.marg@fit[["z"]]
sigma_time0 <- fit.marg@fit[["sigma"]][length(fit.marg@fit[["sigma"]])]
log_return_time0 <- log_return[length(log_return)]
## use the garch model to estimate the estimated variance for each market factor on time 1
sigma_tplus1[j] <- sqrt(sum(coef(fit.marg)*c(1,log_return_time0^2,sigma_time0^2)))
}
##Let's right now create a function of FHS process assuming constant correlations in order to simulate
## the log_return of all the market factors on time 1
multivariate_FHS_1day <- function(FH, shock_database, sigma_tplus1){
set.seed(123)
sim_ret_tplus1 <- matrix(rep(0, ncol(shock_database)*FH),nrow = ncol(shock_database), ncol = FH)
colnames(sim_ret_tplus1) <- 1:FH
for (k in 1:FH) {
m <- sample(1:nrow(shock_database),1,replace = T)
z_tplus1_sim <- shock_database[m,]
sim_ret_tplus1[,k] <- z_tplus1_sim * sigma_tplus1
}
return(sim_ret_tplus1)
}
sim_ret_tplus1_1d <- multivariate_FHS_1day(FH = 10000, shock_database = daily_shock, sigma_tplus1 = sigma_tplus1)
## sort out all the market factors at time 0
variable_time0 <- cbind(stock_price_time0, vol_call_time0, vol_put_time0)
port_value_sim1_vega <- rep(0,10000)
port_value_sim1_theta <- rep(0,10000)
tau_sim1 <- 20/252 ## one day passed
for (l in 1:10000) {
stock_sim1 <- variable_time0[,1] * exp(sim_ret_tplus1_1d[,l][1:30])
vol_call_sim1 <- variable_time0[,2] * exp(sim_ret_tplus1_1d[,l][31:60])
vol_put_sim1 <- variable_time0[,3] * exp(sim_ret_tplus1_1d[,l][61:90])
price_call_sim1 <- rep(0,30)
price_put_sim1 <- rep(0,30)
for (i in 1:30) {
if(i==1){
s_sim1_DJX <- stock_sim1[1]/100
price_call_sim1[i] <- GBSOption(TypeFlag = "c", S = s_sim1_DJX , X = X_time0_call[i], Time = tau_sim1, r = r_f,
b = cost_of_carry[i], sigma = vol_call_sim1[i])@price
price_put_sim1[i] <- GBSOption(TypeFlag = "p", S = s_sim1_DJX , X = X_time0_put[i], Time = tau_sim1, r = r_f,
b = cost_of_carry[i], sigma = vol_put_sim1[i])@price
}else{
price_call_sim1[i] <- CRRBinomialTreeOption(TypeFlag = "ca", S=stock_sim1[i], X=X_time0_call[i], Time = tau_sim1,
r=r_f, b=cost_of_carry[i], sigma=vol_call_sim1[i], n=tau_sim1*252, title = NULL, description = NULL)@price
price_put_sim1[i] <- CRRBinomialTreeOption(TypeFlag = "pa", S=stock_sim1[i], X=X_time0_put[i], Time = tau_sim1,
r=r_f, b=cost_of_carry[i], sigma=vol_put_sim1[i], n=tau_sim1*252, title = NULL, description = NULL)@price
}
}
port_value_sim1_vega[l] <- -(price_call_sim1[1]+price_put_sim1[1])+k_vega_neutral*sum(price_call_sim1[2:30],price_put_sim1[2:30])
port_value_sim1_theta[l] <- -(price_call_sim1[1]+price_put_sim1[1])+k_theta_neutral*sum(price_call_sim1[2:30],price_put_sim1[2:30])
}
PL_vega_sim1 <- port_value_sim1_vega-port_time0_vega_neutral
PL_theta_sim1 <- port_value_sim1_theta-port_time0_theta_neutral
VaR_vega_1perc <- abs(quantile(PL_vega_sim1, 0.01))
VaR_vega_1perc
VaR_vega_5perc <- abs(quantile(PL_vega_sim1, 0.05))
VaR_vega_5perc
VaR_theta_1perc <- abs(quantile(PL_theta_sim1, 0.01))
VaR_theta_1perc
VaR_theta_5perc <- abs(quantile(PL_theta_sim1, 0.05))
VaR_theta_5perc
hist(PL_vega_sim1, main = "P/L Distribution of the vega-neutral portfolio(1-day horizon)", xlab = "P/L",breaks=100,col = "green",
border = "blue")
hist(PL_theta_sim1, main = "P/L Distribution of the theta-neutral portfolio(1-day horizon)", xlab = "P/L", breaks=100,col = "green",
border = "blue")
##calculate the expected shortfall based on the VaR we have generated above
ES_1perc_vega_1 = -mean(PL_vega_sim1[PL_vega_sim1<=(-VaR_vega_1perc)])
ES_1perc_vega_1
ES_5perc_vega_1 = -mean(PL_vega_sim1[PL_vega_sim1<=(-VaR_vega_5perc)])
ES_5perc_vega_1
ES_1perc_theta_1 = -mean(PL_theta_sim1[PL_theta_sim1<=(-VaR_theta_1perc)])
ES_1perc_theta_1
ES_5perc_theta_1 = -mean(PL_theta_sim1[PL_theta_sim1<=(-VaR_theta_5perc)])
ES_5perc_theta_1
##now we want to calculate the VaR of both portfolios using a horizon of one-month(21 trading days)
##since after 21 trading days, all the options are expired simultaneously, we only need to simulate
##30 market factors(log_return of the 30 stocks' prices)and use the option payoff the calculate
## the profits/losses for the portfolios in 21 trading days.
sigma_initial <- sigma_tplus1[1:30]
garch_stock_coef
daily_shock_stock <- daily_shock[,1:30]
multivariate_FHS_1month <- function(FH, shock_database, sigma_initial, garch_coef){
set.seed(123)
horizon <- 21
sim_ret_cul <- matrix(rep(0, ncol(shock_database)*FH),nrow = ncol(shock_database), ncol = FH)
for (day in 1:horizon) {
if (day==1) {
sim_ret_new <- matrix(rep(0, ncol(shock_database)*FH),nrow = ncol(shock_database), ncol = FH)
sim_std_new <- matrix(rep(0, ncol(shock_database)*FH),nrow = ncol(shock_database), ncol = FH)
for (i in 1:FH) {
m <- sample(1:nrow(shock_database),1,replace = T)
z_tplus1_sim <- shock_database[m,]
log_return_tplus1_sim <- z_tplus1_sim * sigma_initial
sim_ret_new[,i] <- log_return_tplus1_sim
for (j in 1:ncol(shock_database)) {
sigma_initial[j] <- sqrt(sum(garch_coef[,j]*c(1,log_return_tplus1_sim[j]^2,sigma_initial[j]^2)))
}
sim_std_new[,i] <- sigma_initial}
sim_ret_cul <- sim_ret_new
}else{
for (i in 1:FH) {
m <- sample(1:nrow(shock_database),1,replace = T)
z_tplus1_sim <- shock_database[m,]
log_return_tplus1_sim <- z_tplus1_sim * sim_std_new[,i]
sim_ret_new[,i] <- log_return_tplus1_sim
for (j in 1:ncol(shock_database)) {
sigma_initial[j] <- sqrt(sum(garch_coef[,j]*c(1,log_return_tplus1_sim[j]^2,sim_std_new[j,i]^2)))
}
sim_std_new[,i] <- sigma_initial}
sim_ret_cul <- sim_ret_new + sim_ret_cul
}
}
return(sim_ret_cul)
}
#cbind(sim_ret_cul, sim_ret_new)
FHS_ret_21_sim <- multivariate_FHS_1month(FH=10000, shock_database=daily_shock_stock, sigma_initial=sigma_initial, garch_coef=garch_stock_coef)
port_value_sim21_vega <- rep(0,10000)
port_value_sim21_theta <- rep(0,10000)
for(i in 1:10000){
price_call_sim21 <- rep(0,30)
price_put_sim21 <- rep(0,30)
stock_sim_expiration <- exp(FHS_ret_21_sim[,i])*stock_price_time0
for (j in 1:30) {
if (j==1) {
stock_sim21_DJX <- stock_sim_expiration[1]/100
price_call_sim21[j] <- max(stock_sim21_DJX - X_time0_call[j],0)
price_put_sim21[j] <- max(X_time0_put[j] - stock_sim21_DJX,0)
}else{
price_call_sim21[j] <- max(stock_sim_expiration[j] - X_time0_call[j],0)
price_put_sim21[j] <- max(X_time0_put[j] - stock_sim_expiration[j],0)
}
}
port_value_sim21_vega[i] <- -(price_call_sim21[1]+price_put_sim21[1])+k_vega_neutral*sum(price_call_sim21[2:30],price_put_sim21[2:30])
port_value_sim21_theta[i] <- -(price_call_sim21[1]+price_put_sim21[1])+k_theta_neutral*sum(price_call_sim21[2:30],price_put_sim21[2:30])
}
PL_vega_sim21 <- port_value_sim21_vega-port_time0_vega_neutral
PL_theta_sim21 <- port_value_sim21_theta-port_time0_theta_neutral
VaR_vega_1perc_21 <- abs(quantile(PL_vega_sim21, 0.01))
VaR_vega_1perc_21
VaR_vega_5perc_21 <- abs(quantile(PL_vega_sim21, 0.05))
VaR_vega_5perc_21
VaR_theta_1perc_21 <- abs(quantile(PL_theta_sim21, 0.01))
VaR_theta_1perc_21
VaR_theta_5perc_21 <- abs(quantile(PL_theta_sim21, 0.05))
VaR_theta_5perc_21
hist(PL_vega_sim21, main = "P/L Distribution of the vega-neutral portfolio(1-month horizon)", xlab = "P/L",breaks=1000,col = "green",
border = "blue", xlim=c(-25,25))
hist(PL_theta_sim21, main = "P/L Distribution of the theta-neutral portfolio(1-month horizon)", xlab = "P/L",breaks=1000,col = "green",
border = "blue", xlim=c(-20,20))
ES_1perc_vega_21 = -mean(PL_vega_sim21[PL_vega_sim21<=(-VaR_vega_1perc_21 )])
ES_1perc_vega_21
ES_5perc_vega_21 = -mean(PL_vega_sim21[PL_vega_sim21<=(-VaR_vega_5perc_21)])
ES_5perc_vega_21
ES_1perc_theta_21 = -mean(PL_theta_sim21[PL_theta_sim21<=(-VaR_theta_1perc_21)])
ES_1perc_theta_21
ES_5perc_theta_21 = -mean(PL_theta_sim21[PL_theta_sim21<=(-VaR_theta_5perc_21)])
ES_5perc_theta_21
| /final project R code_Ruixin Zhang.R | no_license | RuixinZhangMarty/FIN-514-Final-Project--dispersion-trade | R | false | false | 21,275 | r | ###Final Project
library("tidyr")
library("fOptions")
library("dplyr")
library("fGarch")
library("rmgarch")
##assupmtion:
##initial date: 06/27/2019
##one-month libor rate(annualized) on initial date was 2.40238%(Sources: Fred Economics Data)
r_f = 2.40238/100
##the sources of historical stock price data come from yahoofinance.com. I hope it works
##Here are the ticker list of the Dow Jones Index and the other 29 component stocks in the Dow Jones index:
ticker <- c("^DJI","AAPL","AXP","BA","CAT","CSCO","CVX","DIS","GS","HD","IBM","INTC","JNJ","JPM",
"KO","MCD","MMM","MRK","MSFT","NKE","PFE","PG","TRV","UNH","UTX","V","VZ","WBA","WMT","XOM")
folder <- "C:/Users/z/Desktop/FIN 567/project/stock price and log return data/"
g <- ".csv"
datafiles <- paste0(rep(folder,30),ticker,rep(g,30))
start <- "2017-06-27"
end <- "2019-06-27"
##There are 503 rows of historical data for (log) stock returns(two years of historical stock data)
stock_return <- matrix(rep(0,503*30),nrow=503,ncol=30)
colnames(stock_return) <- ticker
stock_price_time0 <- rep(0,30)
for (i in 1:30) {
data1 <- read.csv(datafiles[i],stringsAsFactors = FALSE)
data1$Date <- as.Date(data1$Date)
data2 <- drop_na(data1[which(data1>start & data1<=end),])
stock_return[,i] <- data2[,3]
stock_price_time0[i] <- data2[nrow(data2),2]
}
## now we need to handle some dividend rate for all the stocks inside our portfolio:
## you can see we have already done some preliminary calculation for dividend yieldsin another R script,
## which saves us a lot of energy and time
folder1 <- "C:/Users/z/Desktop/FIN 567/project/dividend rate/"
g1 <- "_div_yield.csv"
datafiles1 <- paste0(rep(folder1,30),ticker,rep(g1,30))
div <- rep(0,30)
for (i in 1:30) {
data3 <- read.csv(datafiles1[i],stringsAsFactors = FALSE)[,2]
if (length(data3)!=21){
print("error")
}
div[i] <- data3[21]
}
cost_of_carry <- r_f-div
##now we want to calculate the K here to determine the composition of the two portfolios for both methods(k1: vega_neutral, k2: theta_neutral)
time0 <- read.csv("C:/Users/z/Desktop/FIN 567/project/calculate k/all the options on 20190627.csv",)
ticker_new <- data.frame(c("DJX",ticker[-1]))
names(ticker_new) <- "ticker"
time0_call <- time0[which(time0$days == 30 & time0$delta ==50),]
time0_put <- time0[which(time0$days == 30 & time0$delta ==-50),]
##just changing the order of the historical data by the order of ticker_new and remove some unnecessary columns
time0_call <- left_join(ticker_new, time0_call, by = "ticker")[,c("ticker","impl_volatility","impl_strike")]
time0_put <- left_join(ticker_new, time0_put, by = "ticker")[,c("ticker","impl_volatility","impl_strike")]
vega_time0_call <- rep(0,30)
vega_time0_put <- rep(0,30)
theta_time0_call <- rep(0,30)
theta_time0_put <- rep(0,30)
delta_time0_call <- rep(0,30)
delta_time0_put <- rep(0,30)
gamma_time0_call <- rep(0,30)
gamma_time0_put <- rep(0,30)
price_time0_call <- rep(0,30)
price_time0_put <- rep(0,30)
tau_time0 <- 21/252
vol_time0_call <- time0_call[,2]
vol_time0_put <- time0_put[,2]
X_time0_call <- time0_call[,3]
X_time0_put <- time0_put[,3]
for (i in 1:30) {
if(i==1){
s_time0 <- stock_price_time0[i]/100
vega_time0_call[i] <- GBSGreeks(Selection = "vega", TypeFlag = "c", S = s_time0, X = X_time0_call[i], Time = tau_time0, r = r_f,
b = cost_of_carry[i], sigma = vol_time0_call[i])
vega_time0_put[i] <- GBSGreeks(Selection = "vega", TypeFlag = "p", S = s_time0, X = X_time0_put[i], Time = tau_time0, r = r_f,
b = cost_of_carry[i], sigma = vol_time0_put[i])
theta_time0_call[i] <- GBSGreeks(Selection = "theta", TypeFlag = "c", S = s_time0, X = X_time0_call[i], Time = tau_time0, r = r_f,
b = cost_of_carry[i], sigma = vol_time0_call[i])
theta_time0_put[i] <- GBSGreeks(Selection = "theta", TypeFlag = "p", S = s_time0, X = X_time0_put[i], Time = tau_time0, r = r_f,
b = cost_of_carry[i], sigma = vol_time0_put[i])
delta_time0_call[i] <- GBSGreeks(Selection = "delta", TypeFlag = "c", S = s_time0, X = X_time0_call[i], Time = tau_time0, r = r_f,
b = cost_of_carry[i], sigma = vol_time0_call[i])
delta_time0_put[i] <- GBSGreeks(Selection = "delta", TypeFlag = "p", S = s_time0, X = X_time0_put[i], Time = tau_time0, r = r_f,
b = cost_of_carry[i], sigma = vol_time0_put[i])
gamma_time0_call[i] <- GBSGreeks(Selection = "gamma", TypeFlag = "c", S = s_time0, X = X_time0_call[i], Time = tau_time0, r = r_f,
b = cost_of_carry[i], sigma = vol_time0_call[i])
gamma_time0_put[i] <- GBSGreeks(Selection = "gamma", TypeFlag = "p", S = s_time0, X = X_time0_put[i], Time = tau_time0, r = r_f,
b = cost_of_carry[i], sigma = vol_time0_put[i])
price_time0_call[i] <- GBSOption(TypeFlag = "c", S = s_time0, X = X_time0_call[i], Time = tau_time0, r = r_f,
b = cost_of_carry[i], sigma = vol_time0_call[i])@price
price_time0_put[i] <- GBSOption(TypeFlag = "p", S = s_time0, X = X_time0_put[i], Time = tau_time0, r = r_f,
b = cost_of_carry[i], sigma = vol_time0_put[i])@price
}else{
vega_time0_call[i] <- GBSGreeks(Selection = "vega", TypeFlag = "c", S = stock_price_time0[i], X = X_time0_call[i], Time = tau_time0, r = r_f,
b = cost_of_carry[i], sigma = vol_time0_call[i])
vega_time0_put[i] <- GBSGreeks(Selection = "vega", TypeFlag = "p", S = stock_price_time0[i], X = X_time0_put[i], Time = tau_time0, r = r_f,
b = cost_of_carry[i], sigma = vol_time0_put[i])
theta_time0_call[i] <- GBSGreeks(Selection = "theta", TypeFlag = "c", S = stock_price_time0[i], X = X_time0_call[i], Time = tau_time0, r = r_f,
b = cost_of_carry[i], sigma = vol_time0_call[i])
theta_time0_put[i] <- GBSGreeks(Selection = "theta", TypeFlag = "p", S = stock_price_time0[i], X = X_time0_put[i], Time = tau_time0, r = r_f,
b = cost_of_carry[i], sigma = vol_time0_put[i])
delta_time0_call[i] <- GBSGreeks(Selection = "delta", TypeFlag = "c", S = stock_price_time0[i], X = X_time0_call[i], Time = tau_time0, r = r_f,
b = cost_of_carry[i], sigma = vol_time0_call[i])
delta_time0_put[i] <- GBSGreeks(Selection = "delta", TypeFlag = "p", S = stock_price_time0[i], X = X_time0_put[i], Time = tau_time0, r = r_f,
b = cost_of_carry[i], sigma = vol_time0_put[i])
gamma_time0_call[i] <- GBSGreeks(Selection = "gamma", TypeFlag = "c", S = stock_price_time0[i], X = X_time0_call[i], Time = tau_time0, r = r_f,
b = cost_of_carry[i], sigma = vol_time0_call[i])
gamma_time0_put[i] <- GBSGreeks(Selection = "gamma", TypeFlag = "p", S = stock_price_time0[i], X = X_time0_put[i], Time = tau_time0, r = r_f,
b = cost_of_carry[i], sigma = vol_time0_put[i])
price_time0_call[i] <- CRRBinomialTreeOption(TypeFlag = "ca", S=stock_price_time0[i], X=X_time0_call[i], Time = tau_time0,
r=r_f, b=cost_of_carry[i], sigma=vol_time0_call[i], n=tau_time0*252, title = NULL, description = NULL)@price
price_time0_put[i] <- CRRBinomialTreeOption(TypeFlag = "pa", S=stock_price_time0[i], X=X_time0_put[i], Time = tau_time0,
r=r_f, b=cost_of_carry[i], sigma=vol_time0_put[i], n=tau_time0*252, title = NULL, description = NULL)@price
}
}
k_vega_neutral <- (vega_time0_call[1]+vega_time0_put[1])/sum(vega_time0_call[2:30],vega_time0_put[2:30])
k_vega_neutral
k_theta_neutral <- (theta_time0_call[1]+theta_time0_put[1])/sum(theta_time0_call[2:30],theta_time0_put[2:30])
k_theta_neutral
##if we assume we sell one index straddle and buy kwi components stocks straddles(wi is equal among all
## the component stocks, which is 1/29), we generate the value of new k here for two formulas
k_vega_actual <- k_vega_neutral * 29
k_vega_actual
k_theta_actual <- k_theta_neutral * 29
k_theta_actual
## calculating greek letter risks of both portfolios constructed on time 0 after getting the value of k
## vega_neutral
vega_port_vega <- -(vega_time0_call[1] + vega_time0_put[1])+ k_vega_neutral * sum(vega_time0_call[2:30],vega_time0_put[2:30])
vega_port_vega
theta_port_vega <- -(theta_time0_call[1] + theta_time0_put[1])+ k_vega_neutral * sum(theta_time0_call[2:30],theta_time0_put[2:30])
theta_port_vega
gamma_port_vega <- -(gamma_time0_call[1] + gamma_time0_put[1])+ k_vega_neutral * sum(gamma_time0_call[2:30],gamma_time0_put[2:30])
gamma_port_vega
delta_port_vega <- -(delta_time0_call[1] + delta_time0_put[1])+ k_vega_neutral * sum(delta_time0_call[2:30],delta_time0_put[2:30])
delta_port_vega
## theta_neutral(Greek Letter Risks)
vega_port_theta <- -(vega_time0_call[1] + vega_time0_put[1])+ k_theta_neutral * sum(vega_time0_call[2:30],vega_time0_put[2:30])
vega_port_theta
theta_port_theta <- -(theta_time0_call[1] + theta_time0_put[1])+ k_theta_neutral * sum(theta_time0_call[2:30],theta_time0_put[2:30])
theta_port_theta
gamma_port_theta <- -(gamma_time0_call[1] + gamma_time0_put[1])+ k_theta_neutral * sum(gamma_time0_call[2:30],gamma_time0_put[2:30])
gamma_port_theta
delta_port_theta <- -(delta_time0_call[1] + delta_time0_put[1])+ k_theta_neutral * sum(delta_time0_call[2:30],delta_time0_put[2:30])
delta_port_theta
##calculate the initial portfolio value at time 0 for both methods:
port_time0_vega_neutral <- -(price_time0_call[1]+price_time0_put[1])+k_vega_neutral*sum(price_time0_call[2:30],price_time0_put[2:30])
port_time0_vega_neutral
port_time0_theta_neutral <- -(price_time0_call[1]+price_time0_put[1])+k_theta_neutral*sum(price_time0_call[2:30],price_time0_put[2:30])
port_time0_theta_neutral
##Next we do some data pre-processing of the historical implied volatilities of all the component options inside the Dow Jones index
his_vol <- read.csv("C:/Users/z/Desktop/FIN 567/project/historical option data of all the options/historical option data.csv",)
his_vol_important <- his_vol[,c(2,3,4,5,7,8)]
his_call <- his_vol_important[which(his_vol_important$days == 30 & his_vol_important$delta ==50),]
his_put <- his_vol_important[which(his_vol_important$days == 30 & his_vol_important$delta ==-50),]
vol_call_log <- matrix(rep(0,30*503),nrow = 503, ncol = 30)
vol_put_log <- matrix(rep(0,30*503),nrow = 503, ncol = 30)
colnames(vol_call_log) <- paste0(ticker_new[,1],rep("_vol_call",30))
colnames(vol_put_log) <- paste0(ticker_new[,1],rep("_vol_put",30))
vol_call_time0 <- rep(0,30)
vol_put_time0 <- rep(0,30)
for (i in 1:30) {
call_one <- his_call[which(his_call$ticker==ticker_new[i,1]),"impl_volatility"]
put_one <- his_put[which(his_put$ticker==ticker_new[i,1]),"impl_volatility"]
vol_call_time0[i] <- call_one[length(call_one)]
vol_put_time0[i] <- put_one[length(put_one)]
vol_call_log[,i]=log(call_one[2:length(call_one)]/call_one[1:length(call_one)-1])
vol_put_log[,i]=log(put_one[2:length(put_one)]/put_one[1:length(put_one)-1])
}
colnames(stock_return) <- ticker_new[,1]
market_factor <- cbind(stock_return,vol_call_log,vol_put_log) ##important very important
## As an initial step (not required), estimate a univariate GARCH model for each return process(there are 93 factors)
## Specify univariate GARCH(1,1) model and set mean return = 0
uspec <- ugarchspec(variance.model = list(model = "sGARCH", garchOrder = c(1,1)),
mean.model = list(armaOrder = c(0,0), include.mean = FALSE),
distribution.model = "norm")
## Check the univariate specification for the 90 series, actually we don't really care about the
## parameters for each garch model, what we really want is the daily shock and the estimated sigma
## on last day for each market factor to simulate sigma
sigma_tplus1 <- rep(0,90)
daily_shock <- matrix(rep(0, 90*503),nrow=503,ncol=90)
colnames(daily_shock) <- colnames(market_factor)
## we only record the parameters of the garch(1,1) model for the log_return of stocks for future use
garch_stock_coef <- matrix(rep(0, 30*3), nrow = 3, ncol=30)
colnames(garch_stock_coef) <- ticker_new[,1]
for (j in 1:90) {
log_return <- market_factor[,j]
fit.marg <- ugarchfit(spec = uspec, data = log_return)
if (j <= 30) {
garch_stock_coef[,j] <- coef(fit.marg)
}
daily_shock[,j] <- fit.marg@fit[["z"]]
sigma_time0 <- fit.marg@fit[["sigma"]][length(fit.marg@fit[["sigma"]])]
log_return_time0 <- log_return[length(log_return)]
## use the garch model to estimate the estimated variance for each market factor on time 1
sigma_tplus1[j] <- sqrt(sum(coef(fit.marg)*c(1,log_return_time0^2,sigma_time0^2)))
}
##Let's right now create a function of FHS process assuming constant correlations in order to simulate
## the log_return of all the market factors on time 1
multivariate_FHS_1day <- function(FH, shock_database, sigma_tplus1){
set.seed(123)
sim_ret_tplus1 <- matrix(rep(0, ncol(shock_database)*FH),nrow = ncol(shock_database), ncol = FH)
colnames(sim_ret_tplus1) <- 1:FH
for (k in 1:FH) {
m <- sample(1:nrow(shock_database),1,replace = T)
z_tplus1_sim <- shock_database[m,]
sim_ret_tplus1[,k] <- z_tplus1_sim * sigma_tplus1
}
return(sim_ret_tplus1)
}
sim_ret_tplus1_1d <- multivariate_FHS_1day(FH = 10000, shock_database = daily_shock, sigma_tplus1 = sigma_tplus1)
## sort out all the market factors at time 0
variable_time0 <- cbind(stock_price_time0, vol_call_time0, vol_put_time0)
port_value_sim1_vega <- rep(0,10000)
port_value_sim1_theta <- rep(0,10000)
tau_sim1 <- 20/252 ## one day passed
for (l in 1:10000) {
stock_sim1 <- variable_time0[,1] * exp(sim_ret_tplus1_1d[,l][1:30])
vol_call_sim1 <- variable_time0[,2] * exp(sim_ret_tplus1_1d[,l][31:60])
vol_put_sim1 <- variable_time0[,3] * exp(sim_ret_tplus1_1d[,l][61:90])
price_call_sim1 <- rep(0,30)
price_put_sim1 <- rep(0,30)
for (i in 1:30) {
if(i==1){
s_sim1_DJX <- stock_sim1[1]/100
price_call_sim1[i] <- GBSOption(TypeFlag = "c", S = s_sim1_DJX , X = X_time0_call[i], Time = tau_sim1, r = r_f,
b = cost_of_carry[i], sigma = vol_call_sim1[i])@price
price_put_sim1[i] <- GBSOption(TypeFlag = "p", S = s_sim1_DJX , X = X_time0_put[i], Time = tau_sim1, r = r_f,
b = cost_of_carry[i], sigma = vol_put_sim1[i])@price
}else{
price_call_sim1[i] <- CRRBinomialTreeOption(TypeFlag = "ca", S=stock_sim1[i], X=X_time0_call[i], Time = tau_sim1,
r=r_f, b=cost_of_carry[i], sigma=vol_call_sim1[i], n=tau_sim1*252, title = NULL, description = NULL)@price
price_put_sim1[i] <- CRRBinomialTreeOption(TypeFlag = "pa", S=stock_sim1[i], X=X_time0_put[i], Time = tau_sim1,
r=r_f, b=cost_of_carry[i], sigma=vol_put_sim1[i], n=tau_sim1*252, title = NULL, description = NULL)@price
}
}
port_value_sim1_vega[l] <- -(price_call_sim1[1]+price_put_sim1[1])+k_vega_neutral*sum(price_call_sim1[2:30],price_put_sim1[2:30])
port_value_sim1_theta[l] <- -(price_call_sim1[1]+price_put_sim1[1])+k_theta_neutral*sum(price_call_sim1[2:30],price_put_sim1[2:30])
}
PL_vega_sim1 <- port_value_sim1_vega-port_time0_vega_neutral
PL_theta_sim1 <- port_value_sim1_theta-port_time0_theta_neutral
VaR_vega_1perc <- abs(quantile(PL_vega_sim1, 0.01))
VaR_vega_1perc
VaR_vega_5perc <- abs(quantile(PL_vega_sim1, 0.05))
VaR_vega_5perc
VaR_theta_1perc <- abs(quantile(PL_theta_sim1, 0.01))
VaR_theta_1perc
VaR_theta_5perc <- abs(quantile(PL_theta_sim1, 0.05))
VaR_theta_5perc
hist(PL_vega_sim1, main = "P/L Distribution of the vega-neutral portfolio(1-day horizon)", xlab = "P/L",breaks=100,col = "green",
border = "blue")
hist(PL_theta_sim1, main = "P/L Distribution of the theta-neutral portfolio(1-day horizon)", xlab = "P/L", breaks=100,col = "green",
border = "blue")
##calculate the expected shortfall based on the VaR we have generated above
ES_1perc_vega_1 = -mean(PL_vega_sim1[PL_vega_sim1<=(-VaR_vega_1perc)])
ES_1perc_vega_1
ES_5perc_vega_1 = -mean(PL_vega_sim1[PL_vega_sim1<=(-VaR_vega_5perc)])
ES_5perc_vega_1
ES_1perc_theta_1 = -mean(PL_theta_sim1[PL_theta_sim1<=(-VaR_theta_1perc)])
ES_1perc_theta_1
ES_5perc_theta_1 = -mean(PL_theta_sim1[PL_theta_sim1<=(-VaR_theta_5perc)])
ES_5perc_theta_1
##now we want to calculate the VaR of both portfolios using a horizon of one-month(21 trading days)
##since after 21 trading days, all the options are expired simultaneously, we only need to simulate
##30 market factors(log_return of the 30 stocks' prices)and use the option payoff the calculate
## the profits/losses for the portfolios in 21 trading days.
sigma_initial <- sigma_tplus1[1:30]
garch_stock_coef
daily_shock_stock <- daily_shock[,1:30]
multivariate_FHS_1month <- function(FH, shock_database, sigma_initial, garch_coef){
set.seed(123)
horizon <- 21
sim_ret_cul <- matrix(rep(0, ncol(shock_database)*FH),nrow = ncol(shock_database), ncol = FH)
for (day in 1:horizon) {
if (day==1) {
sim_ret_new <- matrix(rep(0, ncol(shock_database)*FH),nrow = ncol(shock_database), ncol = FH)
sim_std_new <- matrix(rep(0, ncol(shock_database)*FH),nrow = ncol(shock_database), ncol = FH)
for (i in 1:FH) {
m <- sample(1:nrow(shock_database),1,replace = T)
z_tplus1_sim <- shock_database[m,]
log_return_tplus1_sim <- z_tplus1_sim * sigma_initial
sim_ret_new[,i] <- log_return_tplus1_sim
for (j in 1:ncol(shock_database)) {
sigma_initial[j] <- sqrt(sum(garch_coef[,j]*c(1,log_return_tplus1_sim[j]^2,sigma_initial[j]^2)))
}
sim_std_new[,i] <- sigma_initial}
sim_ret_cul <- sim_ret_new
}else{
for (i in 1:FH) {
m <- sample(1:nrow(shock_database),1,replace = T)
z_tplus1_sim <- shock_database[m,]
log_return_tplus1_sim <- z_tplus1_sim * sim_std_new[,i]
sim_ret_new[,i] <- log_return_tplus1_sim
for (j in 1:ncol(shock_database)) {
sigma_initial[j] <- sqrt(sum(garch_coef[,j]*c(1,log_return_tplus1_sim[j]^2,sim_std_new[j,i]^2)))
}
sim_std_new[,i] <- sigma_initial}
sim_ret_cul <- sim_ret_new + sim_ret_cul
}
}
return(sim_ret_cul)
}
#cbind(sim_ret_cul, sim_ret_new)
FHS_ret_21_sim <- multivariate_FHS_1month(FH=10000, shock_database=daily_shock_stock, sigma_initial=sigma_initial, garch_coef=garch_stock_coef)
port_value_sim21_vega <- rep(0,10000)
port_value_sim21_theta <- rep(0,10000)
for(i in 1:10000){
price_call_sim21 <- rep(0,30)
price_put_sim21 <- rep(0,30)
stock_sim_expiration <- exp(FHS_ret_21_sim[,i])*stock_price_time0
for (j in 1:30) {
if (j==1) {
stock_sim21_DJX <- stock_sim_expiration[1]/100
price_call_sim21[j] <- max(stock_sim21_DJX - X_time0_call[j],0)
price_put_sim21[j] <- max(X_time0_put[j] - stock_sim21_DJX,0)
}else{
price_call_sim21[j] <- max(stock_sim_expiration[j] - X_time0_call[j],0)
price_put_sim21[j] <- max(X_time0_put[j] - stock_sim_expiration[j],0)
}
}
port_value_sim21_vega[i] <- -(price_call_sim21[1]+price_put_sim21[1])+k_vega_neutral*sum(price_call_sim21[2:30],price_put_sim21[2:30])
port_value_sim21_theta[i] <- -(price_call_sim21[1]+price_put_sim21[1])+k_theta_neutral*sum(price_call_sim21[2:30],price_put_sim21[2:30])
}
PL_vega_sim21 <- port_value_sim21_vega-port_time0_vega_neutral
PL_theta_sim21 <- port_value_sim21_theta-port_time0_theta_neutral
VaR_vega_1perc_21 <- abs(quantile(PL_vega_sim21, 0.01))
VaR_vega_1perc_21
VaR_vega_5perc_21 <- abs(quantile(PL_vega_sim21, 0.05))
VaR_vega_5perc_21
VaR_theta_1perc_21 <- abs(quantile(PL_theta_sim21, 0.01))
VaR_theta_1perc_21
VaR_theta_5perc_21 <- abs(quantile(PL_theta_sim21, 0.05))
VaR_theta_5perc_21
hist(PL_vega_sim21, main = "P/L Distribution of the vega-neutral portfolio(1-month horizon)", xlab = "P/L",breaks=1000,col = "green",
border = "blue", xlim=c(-25,25))
hist(PL_theta_sim21, main = "P/L Distribution of the theta-neutral portfolio(1-month horizon)", xlab = "P/L",breaks=1000,col = "green",
border = "blue", xlim=c(-20,20))
ES_1perc_vega_21 = -mean(PL_vega_sim21[PL_vega_sim21<=(-VaR_vega_1perc_21 )])
ES_1perc_vega_21
ES_5perc_vega_21 = -mean(PL_vega_sim21[PL_vega_sim21<=(-VaR_vega_5perc_21)])
ES_5perc_vega_21
ES_1perc_theta_21 = -mean(PL_theta_sim21[PL_theta_sim21<=(-VaR_theta_1perc_21)])
ES_1perc_theta_21
ES_5perc_theta_21 = -mean(PL_theta_sim21[PL_theta_sim21<=(-VaR_theta_5perc_21)])
ES_5perc_theta_21
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_features.R
\name{plot_features}
\alias{plot_features}
\alias{plot_feature_bars}
\alias{plot_feature_profiles}
\alias{plot_feature_distributions}
\alias{plot_feature_boxes}
\title{Plot features}
\usage{
plot_features(
object,
geom = default_feature_plots(object)[1],
x = default_x(object, geom[1]),
color_var = default_color_var(object),
color_values = default_color_values(object, color_var),
shape_var = default_shape_var(object),
group_var = default_group_var(object),
txt_var = default_txt_var(object),
facet_var = NULL,
alpha_var = NULL,
fvars = default_fvars(object),
title = "",
scales = "free_y",
x_text_angle = 90,
line = default_line(object),
zero_hline = default_zero_hline(object),
xlab = NULL,
ylab = NULL,
dodge_width = 0,
verbose = FALSE,
file = NULL,
width = NULL,
height = NULL,
theme = ggplot2::theme_bw(),
legend.position = "right"
)
plot_feature_bars(...)
plot_feature_profiles(...)
plot_feature_distributions(...)
plot_feature_boxes(...)
}
\arguments{
\item{object}{SummarizedExperiment, eSet, or EList}
\item{geom}{'point', 'bar', 'violin', 'boxplot'}
\item{x}{svar mapped to x}
\item{color_var}{svar mapped to color}
\item{color_values}{named color vector (names = color_var levels, values = colors)}
\item{shape_var}{svar mapped to shape (only relevant for point)}
\item{group_var}{svar mapped to group}
\item{txt_var}{svar mapped to txt}
\item{facet_var}{svar on which to facet plot}
\item{alpha_var}{svar (logical) mapped to absence/presence of transparancy}
\item{fvars}{fvar used for plot annotation}
\item{title}{plot title}
\item{scales}{'free'}
\item{x_text_angle}{angle of text on x axis}
\item{line}{logical: should line be added?}
\item{zero_hline}{logical: should y=0 line be added?}
\item{xlab}{xlab}
\item{ylab}{ylab}
\item{dodge_width}{numeric}
\item{verbose}{logical}
\item{file}{string: file path}
\item{width}{numeric}
\item{height}{numeric}
\item{theme}{ggplot2::theme}
\item{legend.position}{position of legend}
\item{...}{only for backward compatibility to deprecated functions}
}
\value{
file path
}
\description{
Plot features
}
\examples{
if (require(autonomics.data)){
require(magrittr)
result_dir <- tempdir() \%T>\% message()
# STEM CELL COMPARISON
object <- autonomics.data::stemcomp.proteinratios \%>\% extract(1:4, )
object \%>\% plot_features(geom = 'violin')
object \%>\% plot_features(geom = 'point')
object \%>\% plot_features(geom = 'boxplot')
object \%>\% plot_features(
geom = 'boxplot',
file = paste0(result_dir, '/stemcomp_boxes.pdf'))
# GLUTAMINASE
object <- autonomics.data::glutaminase[1:4, ]
object \%>\% plot_features(geom = 'boxplot', x = 'TIME_POINT')
}
}
| /autonomics.plot/man/plot_features.Rd | no_license | metabdel/autonomics | R | false | true | 2,856 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_features.R
\name{plot_features}
\alias{plot_features}
\alias{plot_feature_bars}
\alias{plot_feature_profiles}
\alias{plot_feature_distributions}
\alias{plot_feature_boxes}
\title{Plot features}
\usage{
plot_features(
object,
geom = default_feature_plots(object)[1],
x = default_x(object, geom[1]),
color_var = default_color_var(object),
color_values = default_color_values(object, color_var),
shape_var = default_shape_var(object),
group_var = default_group_var(object),
txt_var = default_txt_var(object),
facet_var = NULL,
alpha_var = NULL,
fvars = default_fvars(object),
title = "",
scales = "free_y",
x_text_angle = 90,
line = default_line(object),
zero_hline = default_zero_hline(object),
xlab = NULL,
ylab = NULL,
dodge_width = 0,
verbose = FALSE,
file = NULL,
width = NULL,
height = NULL,
theme = ggplot2::theme_bw(),
legend.position = "right"
)
plot_feature_bars(...)
plot_feature_profiles(...)
plot_feature_distributions(...)
plot_feature_boxes(...)
}
\arguments{
\item{object}{SummarizedExperiment, eSet, or EList}
\item{geom}{'point', 'bar', 'violin', 'boxplot'}
\item{x}{svar mapped to x}
\item{color_var}{svar mapped to color}
\item{color_values}{named color vector (names = color_var levels, values = colors)}
\item{shape_var}{svar mapped to shape (only relevant for point)}
\item{group_var}{svar mapped to group}
\item{txt_var}{svar mapped to txt}
\item{facet_var}{svar on which to facet plot}
\item{alpha_var}{svar (logical) mapped to absence/presence of transparancy}
\item{fvars}{fvar used for plot annotation}
\item{title}{plot title}
\item{scales}{'free'}
\item{x_text_angle}{angle of text on x axis}
\item{line}{logical: should line be added?}
\item{zero_hline}{logical: should y=0 line be added?}
\item{xlab}{xlab}
\item{ylab}{ylab}
\item{dodge_width}{numeric}
\item{verbose}{logical}
\item{file}{string: file path}
\item{width}{numeric}
\item{height}{numeric}
\item{theme}{ggplot2::theme}
\item{legend.position}{position of legend}
\item{...}{only for backward compatibility to deprecated functions}
}
\value{
file path
}
\description{
Plot features
}
\examples{
if (require(autonomics.data)){
require(magrittr)
result_dir <- tempdir() \%T>\% message()
# STEM CELL COMPARISON
object <- autonomics.data::stemcomp.proteinratios \%>\% extract(1:4, )
object \%>\% plot_features(geom = 'violin')
object \%>\% plot_features(geom = 'point')
object \%>\% plot_features(geom = 'boxplot')
object \%>\% plot_features(
geom = 'boxplot',
file = paste0(result_dir, '/stemcomp_boxes.pdf'))
# GLUTAMINASE
object <- autonomics.data::glutaminase[1:4, ]
object \%>\% plot_features(geom = 'boxplot', x = 'TIME_POINT')
}
}
|
# Task 1f: Code that simulates Markov chain Yn for n = 300 steps.
N <- 1000 # Individuals.
Y0 <- c(950, 50, 0) # Starting state.
# Probabilities (to begin with).
gamma <- 0.10
alpha <- 0.01
beta <- function(Y){
return (0.5*Y[2]/N)
}
n <- 300 # Time steps.
values <- matrix(data=NA,nrow=3,ncol=n) # Preallocate matrix for simulated values.
values[, 1] <- Y0
# Run simulation.
for (t in 2:n){
# Use a binomial to simulate.
old_susc <- values[1, t-1]
old_inf <- values[2, t-1]
old_rec <- values[3, t-1]
new_inf <- rbinom(n = 1, size = old_susc, beta(values[, t-1]))
new_rec <- rbinom(n = 1, size = old_inf, gamma)
new_susc <- rbinom(n = 1,size = old_rec, alpha)
Y <- c(old_susc - new_inf + new_susc, old_inf - new_rec + new_inf, old_rec - new_susc + new_rec)
values[, t] <- Y
}
par_lty <- c(3,2,1)
par_col <- c("blue", "red", "green")
plot(1:n, values[1, ], type = "l", lty = par_lty[1], col = par_col[1], xlab="Time [days]", ylab = "Individuals", main = "One Realization")
lines(1:n, values[2, ], type = "l", lty = par_lty[2], col = par_col[2])
lines(1:n, values[3, ], type = "l", lty = par_lty[3], col = par_col[3])
legend("topright", legend= c("Susceptible", "Infected", "Recovered"), lty = par_lty, col = par_col)
| /Project1/onef.R | no_license | alexaoh/stochmod | R | false | false | 1,249 | r | # Task 1f: Code that simulates Markov chain Yn for n = 300 steps.
N <- 1000 # Individuals.
Y0 <- c(950, 50, 0) # Starting state.
# Probabilities (to begin with).
gamma <- 0.10
alpha <- 0.01
beta <- function(Y){
return (0.5*Y[2]/N)
}
n <- 300 # Time steps.
values <- matrix(data=NA,nrow=3,ncol=n) # Preallocate matrix for simulated values.
values[, 1] <- Y0
# Run simulation.
for (t in 2:n){
# Use a binomial to simulate.
old_susc <- values[1, t-1]
old_inf <- values[2, t-1]
old_rec <- values[3, t-1]
new_inf <- rbinom(n = 1, size = old_susc, beta(values[, t-1]))
new_rec <- rbinom(n = 1, size = old_inf, gamma)
new_susc <- rbinom(n = 1,size = old_rec, alpha)
Y <- c(old_susc - new_inf + new_susc, old_inf - new_rec + new_inf, old_rec - new_susc + new_rec)
values[, t] <- Y
}
par_lty <- c(3,2,1)
par_col <- c("blue", "red", "green")
plot(1:n, values[1, ], type = "l", lty = par_lty[1], col = par_col[1], xlab="Time [days]", ylab = "Individuals", main = "One Realization")
lines(1:n, values[2, ], type = "l", lty = par_lty[2], col = par_col[2])
lines(1:n, values[3, ], type = "l", lty = par_lty[3], col = par_col[3])
legend("topright", legend= c("Susceptible", "Infected", "Recovered"), lty = par_lty, col = par_col)
|
# server.R
library(shiny)
library(datasets) # datasets Author: R Core Team and contributors worldwide
library(ggplot2)
ui_smooth_line <- TRUE
uismoothShadedCR <- TRUE
tgAll <- ToothGrowth
set.seed(52315)
fitAll <- lm(len~dose+supp, data=tgAll)
uismoothing <- 'lm'
uiloess <- FALSE
uicrlevel <- .90
shinyServer(function(input, output) {
# Generate a plot of the data. Also uses the inputs to build
# the plot label. Note that the dependencies on both the inputs
# and the data reactive expression are both tracked, and
# all expressions are called in the sequence implied by the
# dependency graph
# pass variables from the subroutine environment to this environment
myShiny.env <- environment()
output$plot <- renderPlot({
p <- myGpig(input$dose, input$confidence, input$shadow, input$smoothline, input$jitter)
predVC <- get('predVC', envir=myShiny.env)
predOJ <- get('predOJ', envir=myShiny.env)
output$predVC <- renderPrint({ round(predVC, 2) })
output$predOJ <- renderPrint({ round(predOJ, 2) })
output$odose <- renderPrint({ input$dose})
p
})
#-------------------------------------------------------------------------
# function to perform Calculations based on user inputs
#-------------------------------------------------------------------------
myGpig <- function(ui_dose, ui_confidence, ui_shadow, ui_smooth_line, ui_jitter) {
#------- start of render plot ------------
uicrlevel <- as.numeric(ui_confidence)
uismoothShadedCR <- ui_shadow
#-------------------------- plot -----
set.seed(52315)
predmeVC <- data.frame(dose=ui_dose, supp='VC')
predmeOJ <- data.frame(dose=ui_dose, supp='OJ')
#
# predict tooth length given supp and dose
#
predVC <- as.numeric(predict(fitAll, predmeVC))
predOJ <- as.numeric(predict(fitAll, predmeOJ))
# pass two variables back to the environment outside of this subroutine
assign('predVC', predVC, envir=myShiny.env)
assign('predOJ', predOJ, envir=myShiny.env)
p <- ggplot(tgAll, aes(dose, len, color=supp))
p <- p + annotate("text", x=1.65, y=10, label =paste('For dose:', round(ui_dose, 2)))
p <- p + annotate("text", x=1.65, y=8.75, label =paste('lm predicted OJ:',round(predOJ, 2)))
p <- p + annotate("text", x=1.65, y=7.5, label =paste('lm predicted VC:', round(predVC, 2)))
p <- p + geom_vline(xintercept=ui_dose, col='darkgrey', lwd=1)
p <- p + geom_hline(yintercept=predVC, col='darkgrey', lwd=1)
p <- p + geom_hline(yintercept=predOJ, col='darkgrey', lwd=1)
p <- p + geom_point(shape=19)
if (ui_jitter == TRUE) {
p <- p + geom_point(position=position_jitter(width=0.2), alpha=0.4)
}
# smoothing choices
if (ui_smooth_line == TRUE) {
if (uismoothShadedCR == TRUE) {
p <- p + annotate("text", x=1.65, y=5, label=paste("Smoothing confidence:", uicrlevel))
}
if (uiloess == TRUE) {
p <- p + annotate("text", x=1.65, y=6.25, label=paste('Loess smoothing'))
} else {
p <- p + annotate("text", x=1.65, y=6.25, label=paste('LM smoothing'))
}
if (uismoothShadedCR == TRUE) {
if (uiloess == TRUE) {
p <- p + geom_smooth(method=loess, level=uicrlevel) # defaults to loess (locally weighted polynomial curve)
} else {
p <- p + geom_smooth(method=lm, level=uicrlevel) # add linear regression line (95% confidence region)
}
} else {
if (uiloess == TRUE) {
p <- p + geom_smooth(method=loess, se=FALSE, level=uicrlevel) # defaults to loess (locally weighted polynomial curve)
} else {
p <- p + geom_smooth(method=lm, se=FALSE, level=uicrlevel) # add linear regression line (unshaded 95% confidence region)
}
}
} # end of ui_smooth_line == TRUE
p <- p + ggtitle('Tooth Growth with Different Treatments')
#-------------------------- plot -----
}
#-------------------------------------------------------------------------
}) | /server.R | no_license | Roman-Rudensky/DataProducts | R | false | false | 4,185 | r | # server.R
library(shiny)
library(datasets) # datasets Author: R Core Team and contributors worldwide
library(ggplot2)
ui_smooth_line <- TRUE
uismoothShadedCR <- TRUE
tgAll <- ToothGrowth
set.seed(52315)
fitAll <- lm(len~dose+supp, data=tgAll)
uismoothing <- 'lm'
uiloess <- FALSE
uicrlevel <- .90
shinyServer(function(input, output) {
# Generate a plot of the data. Also uses the inputs to build
# the plot label. Note that the dependencies on both the inputs
# and the data reactive expression are both tracked, and
# all expressions are called in the sequence implied by the
# dependency graph
# pass variables from the subroutine environment to this environment
myShiny.env <- environment()
output$plot <- renderPlot({
p <- myGpig(input$dose, input$confidence, input$shadow, input$smoothline, input$jitter)
predVC <- get('predVC', envir=myShiny.env)
predOJ <- get('predOJ', envir=myShiny.env)
output$predVC <- renderPrint({ round(predVC, 2) })
output$predOJ <- renderPrint({ round(predOJ, 2) })
output$odose <- renderPrint({ input$dose})
p
})
#-------------------------------------------------------------------------
# function to perform Calculations based on user inputs
#-------------------------------------------------------------------------
myGpig <- function(ui_dose, ui_confidence, ui_shadow, ui_smooth_line, ui_jitter) {
#------- start of render plot ------------
uicrlevel <- as.numeric(ui_confidence)
uismoothShadedCR <- ui_shadow
#-------------------------- plot -----
set.seed(52315)
predmeVC <- data.frame(dose=ui_dose, supp='VC')
predmeOJ <- data.frame(dose=ui_dose, supp='OJ')
#
# predict tooth length given supp and dose
#
predVC <- as.numeric(predict(fitAll, predmeVC))
predOJ <- as.numeric(predict(fitAll, predmeOJ))
# pass two variables back to the environment outside of this subroutine
assign('predVC', predVC, envir=myShiny.env)
assign('predOJ', predOJ, envir=myShiny.env)
p <- ggplot(tgAll, aes(dose, len, color=supp))
p <- p + annotate("text", x=1.65, y=10, label =paste('For dose:', round(ui_dose, 2)))
p <- p + annotate("text", x=1.65, y=8.75, label =paste('lm predicted OJ:',round(predOJ, 2)))
p <- p + annotate("text", x=1.65, y=7.5, label =paste('lm predicted VC:', round(predVC, 2)))
p <- p + geom_vline(xintercept=ui_dose, col='darkgrey', lwd=1)
p <- p + geom_hline(yintercept=predVC, col='darkgrey', lwd=1)
p <- p + geom_hline(yintercept=predOJ, col='darkgrey', lwd=1)
p <- p + geom_point(shape=19)
if (ui_jitter == TRUE) {
p <- p + geom_point(position=position_jitter(width=0.2), alpha=0.4)
}
# smoothing choices
if (ui_smooth_line == TRUE) {
if (uismoothShadedCR == TRUE) {
p <- p + annotate("text", x=1.65, y=5, label=paste("Smoothing confidence:", uicrlevel))
}
if (uiloess == TRUE) {
p <- p + annotate("text", x=1.65, y=6.25, label=paste('Loess smoothing'))
} else {
p <- p + annotate("text", x=1.65, y=6.25, label=paste('LM smoothing'))
}
if (uismoothShadedCR == TRUE) {
if (uiloess == TRUE) {
p <- p + geom_smooth(method=loess, level=uicrlevel) # defaults to loess (locally weighted polynomial curve)
} else {
p <- p + geom_smooth(method=lm, level=uicrlevel) # add linear regression line (95% confidence region)
}
} else {
if (uiloess == TRUE) {
p <- p + geom_smooth(method=loess, se=FALSE, level=uicrlevel) # defaults to loess (locally weighted polynomial curve)
} else {
p <- p + geom_smooth(method=lm, se=FALSE, level=uicrlevel) # add linear regression line (unshaded 95% confidence region)
}
}
} # end of ui_smooth_line == TRUE
p <- p + ggtitle('Tooth Growth with Different Treatments')
#-------------------------- plot -----
}
#-------------------------------------------------------------------------
}) |
\name{subangular}
\alias{subangular}
\docType{data}
\title{
Dataset of coordinates for a subangula-shaped skeleton unit
%% ~~ data name/kind ... ~~
}
\description{
Dataset of coordinates for a subangula-shaped skeleton unit
%% ~~ A concise (1-5 lines) description of the dataset. ~~
}
\usage{data(subangular)}
\format{
A matrix of coordinates
}
\examples{
plot(subangular, type='n', axes=FALSE)
polygon(subangular)
}
\keyword{datasets}
| /man/subangular.Rd | no_license | cran/soilprofile | R | false | false | 443 | rd | \name{subangular}
\alias{subangular}
\docType{data}
\title{
Dataset of coordinates for a subangula-shaped skeleton unit
%% ~~ data name/kind ... ~~
}
\description{
Dataset of coordinates for a subangula-shaped skeleton unit
%% ~~ A concise (1-5 lines) description of the dataset. ~~
}
\usage{data(subangular)}
\format{
A matrix of coordinates
}
\examples{
plot(subangular, type='n', axes=FALSE)
polygon(subangular)
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/feature_migration.R
\name{feat_migration}
\alias{feat_migration}
\title{Feature: Migration/Gene Flow}
\usage{
feat_migration(rate, pop_from = NULL, pop_to = NULL, symmetric = FALSE,
time = "0", locus_group = "all")
}
\arguments{
\item{rate}{The migration rate. Can be a numeric or a
\code{\link{parameter}}. The rate is specified as
\eqn{4 * N0 * m}, where \eqn{m} is the fraction of
\code{pop_to} that is replaced by migrants
from \code{pop_from} each generation (in forward time).}
\item{pop_from}{The population from which the individuals leave.}
\item{pop_to}{The population to which the individuals move.}
\item{symmetric}{Use the rate for all pairs of populations.}
\item{time}{The time point at which the migration with the migration
rate is set. The rate applies to the time past warts
of the time point, until it is changed again.}
\item{locus_group}{The loci for which this features is used. Can either be
\code{"all"} (default), in which case the feature is used for simulating
all loci, or a numeric vector. In the latter case, the feature is only
used for the loci added in \code{locus_} commands with the corresponding
index starting from 1 in order in which the commands where added to the
model. For example, if a model has
\code{locus_single(10) + locus_averaged(10, 11) + locus_single(12)} and
this argument is \code{c(2, 3)}, than the feature is used for all but
the first locus (that is locus 2 - 12).}
}
\value{
The feature, which can be added to a model created with
\code{\link{coal_model}} using \code{+}.
}
\description{
This feature changes the migration rates at a given time point.
Per default, no migration between the population occurs, which corresponds
to a \code{rate} of \code{0}. Set it to a value greater than zero to
enable migration from one population to another.
}
\details{
When looking forward in time, a fraction of \code{pop_to} that is replaced
by migrants from \code{pop_from} each generation (see \code{rate}). When
looking backwards in time, ancestral lines in \code{pop_to} move to
\code{pop_from} with the given rate.
}
\examples{
# Asymmetric migration between two populations:
model <- coal_model(c(5, 5), 10) +
feat_migration(0.5, 1, 2) +
feat_migration(1.0, 2, 1) +
feat_mutation(5) +
sumstat_sfs()
simulate(model)
# Three populations that exchange migrations with equal
# rates at times more than 0.5 time units in the past:
model <- coal_model(c(3, 4, 5), 2) +
feat_migration(1.2, symmetric = TRUE, time = 0.5) +
feat_mutation(5) +
sumstat_sfs()
simulate(model)
}
\seealso{
For creating a model: \code{\link{coal_model}}
Other features: \code{\link{feat_growth}},
\code{\link{feat_ignore_singletons}},
\code{\link{feat_mutation}}, \code{\link{feat_outgroup}},
\code{\link{feat_pop_merge}},
\code{\link{feat_recombination}},
\code{\link{feat_selection}},
\code{\link{feat_size_change}},
\code{\link{feat_unphased}}
}
| /man/feat_migration.Rd | no_license | gtonkinhill/coala | R | false | true | 2,990 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/feature_migration.R
\name{feat_migration}
\alias{feat_migration}
\title{Feature: Migration/Gene Flow}
\usage{
feat_migration(rate, pop_from = NULL, pop_to = NULL, symmetric = FALSE,
time = "0", locus_group = "all")
}
\arguments{
\item{rate}{The migration rate. Can be a numeric or a
\code{\link{parameter}}. The rate is specified as
\eqn{4 * N0 * m}, where \eqn{m} is the fraction of
\code{pop_to} that is replaced by migrants
from \code{pop_from} each generation (in forward time).}
\item{pop_from}{The population from which the individuals leave.}
\item{pop_to}{The population to which the individuals move.}
\item{symmetric}{Use the rate for all pairs of populations.}
\item{time}{The time point at which the migration with the migration
rate is set. The rate applies to the time past warts
of the time point, until it is changed again.}
\item{locus_group}{The loci for which this features is used. Can either be
\code{"all"} (default), in which case the feature is used for simulating
all loci, or a numeric vector. In the latter case, the feature is only
used for the loci added in \code{locus_} commands with the corresponding
index starting from 1 in order in which the commands where added to the
model. For example, if a model has
\code{locus_single(10) + locus_averaged(10, 11) + locus_single(12)} and
this argument is \code{c(2, 3)}, than the feature is used for all but
the first locus (that is locus 2 - 12).}
}
\value{
The feature, which can be added to a model created with
\code{\link{coal_model}} using \code{+}.
}
\description{
This feature changes the migration rates at a given time point.
Per default, no migration between the population occurs, which corresponds
to a \code{rate} of \code{0}. Set it to a value greater than zero to
enable migration from one population to another.
}
\details{
When looking forward in time, a fraction of \code{pop_to} that is replaced
by migrants from \code{pop_from} each generation (see \code{rate}). When
looking backwards in time, ancestral lines in \code{pop_to} move to
\code{pop_from} with the given rate.
}
\examples{
# Asymmetric migration between two populations:
model <- coal_model(c(5, 5), 10) +
feat_migration(0.5, 1, 2) +
feat_migration(1.0, 2, 1) +
feat_mutation(5) +
sumstat_sfs()
simulate(model)
# Three populations that exchange migrations with equal
# rates at times more than 0.5 time units in the past:
model <- coal_model(c(3, 4, 5), 2) +
feat_migration(1.2, symmetric = TRUE, time = 0.5) +
feat_mutation(5) +
sumstat_sfs()
simulate(model)
}
\seealso{
For creating a model: \code{\link{coal_model}}
Other features: \code{\link{feat_growth}},
\code{\link{feat_ignore_singletons}},
\code{\link{feat_mutation}}, \code{\link{feat_outgroup}},
\code{\link{feat_pop_merge}},
\code{\link{feat_recombination}},
\code{\link{feat_selection}},
\code{\link{feat_size_change}},
\code{\link{feat_unphased}}
}
|
#' @title N days
#' @description Function to count the sample size (number of days) used in the calculation of the indices
#' @template templateMeasureParams
#' @param dates dates
#' @return An integer corresponding to the counts
#' @author J. Bedia, D. San-Martin
#' @export
measure.nDays <- function(indexObs = NULL, indexPrd = NULL, obs, prd, dates) {
length(obs)
}
| /R/measure.nDays.R | no_license | SantanderMetGroup/R_VALUE | R | false | false | 376 | r | #' @title N days
#' @description Function to count the sample size (number of days) used in the calculation of the indices
#' @template templateMeasureParams
#' @param dates dates
#' @return An integer corresponding to the counts
#' @author J. Bedia, D. San-Martin
#' @export
measure.nDays <- function(indexObs = NULL, indexPrd = NULL, obs, prd, dates) {
length(obs)
}
|
#' @include class_AmChart.R class_AmStockChart.R
NULL
setClassUnion(name = "AmCharts", members = c("AmChart", "AmStockChart"))
#' @title Setters for AmChart and AmStockChart.
#' @description These methods can be used both for AmChart and AmStockChart.
#' There are general for some first-level properties.
#' @param .Object \linkS4class{AmChart} or \linkS4class{AmStockChart}.
#' @param enabled \code{logical}, TRUE to display the export button.
#' @param ... Other properties that can be used depending on the setter.
#' @rdname amcharts-setters
#' @export
#'
setGeneric(name = "setExport", def = function(.Object, enabled = TRUE, ...) {standardGeneric("setExport")})
#' @examples
#' \donttest{
#' # Dummy examples
#' setExport(amPlot(1:10))
#' setExport(amStockChart())
#' }
#' @rdname amcharts-setters
#'
setMethod(f = "setExport", signature = c("AmCharts", "logicalOrMissing"),
definition = function(.Object, enabled = TRUE, ...)
{
.Object <- setProperties( .Object, export = list(enabled = enabled, ...) )
validObject(.Object)
return(.Object)
})
#' @rdname amcharts-setters
#' @export
#'
setGeneric(name = "setResponsive", def = function(.Object, enabled = TRUE, ...) {standardGeneric("setResponsive")})
#' @examples
#' \donttest{
#' # Dummy examples
#' setResponsive(amSerialChart())
#' setResponsive(amStockChart())
#' }
#' @rdname amcharts-setters
setMethod(f = "setResponsive", signature = c("AmCharts", "logicalOrMissing"),
definition = function(.Object, enabled = TRUE, ...)
{
.Object <- setProperties(.Object = .Object, responsive = list(enabled = enabled, ...))
validObject(.Object)
return(.Object)
})
#' Test wether a chart can be plotted (or printed)
#' @noRd
#'
.plot_or_print <- function(object)
{
if (length(object@type)) {
# cat("Plotting...")
chart_widget <- plot(object)
if (isTRUE(getOption('knitr.in.progress')))
knitr::knit_print(chart_widget)
else
print(chart_widget)
} else {
# cat("Printing...")
print(object)
}
}
#' @title Visualize AmStockChart with show
#' @description Display the object in the console.
#' @param object \linkS4class{AmChart}.
#' @return If the object has a valid type, it will plot the chart.
#' If not the method will trigger the method 'print'.
#'
setMethod(f = "show", signature = "AmChart", definition = .plot_or_print)
#' @title Visualize AmStockChart with show
#' @description Display the object in the console.
#' @param object \linkS4class{AmStockChart}.
#' @return If the object has a valid type, it will plot the chart.
#' If not the method will trigger the method 'print'.
#'
setMethod(f = "show", signature = "AmStockChart", definition = .plot_or_print)
#' @title PLOTTING METHOD
#' @description Basic method to plot an AmChart
#' @details Plots an object of class \code{\linkS4class{AmChart}}
#' @param x \linkS4class{AmChart}
#' @param y unused.
#' @param width \code{character}.
#' @param height \code{character}.B
#' @param background \code{character}.
#' @param ... Other properties.
#' @rdname plot.AmChart
#' @import htmlwidgets
#' @import htmltools
#' @export
setMethod(f = "plot", signature = "AmCharts",
definition = function(x, y, width = "100%", height = NULL,
background = "#ffffff", ...)
{
chart_ls <- listProperties(x)
# remove temporary parameter
chart_ls["RType_"] <- NULL
theme <- chart_ls$theme
if (length(theme)) {
background <- switch(theme,
"light" = "#ffffff",
"patterns" = "#ffffff",
"default" = "#ffffff",
"dark" = "#3f3f4f",
"chalk" = "#282828",
stop("[plot]: invalid theme"))
} else {}
# set background
if (exists("backgroundColor", where = chart_ls)) {
background <- chart_ls$backgroundColor
chart_ls["backgroundColor"] <- NULL
} else {}
# listeners on chart
if (exists("listeners", where = chart_ls)) {
listeners <- chart_ls$listeners
chart_ls[grep(x = names(chart_ls), pattern = "^listeners")] <- NULL
} else {
listeners <- NULL
}
# listeners on axes (GaugeAxis class)
ls_temp <- substituteMultiListeners(chart_ls, "axes")
chart_ls <- ls_temp$chart
axes_listeners <- ls_temp$listeners_ls
axes_listenersIndices <- ls_temp$indices
# listeners on categoryAxis
ls_temp <- substituteListener(chart_ls, "categoryAxis")
chart_ls <- ls_temp$chart
categoryAxis_listeners <- ls_temp$listeners
# listeners on chartCursor
ls_temp <- substituteListener(chart_ls, "chartCursor")
chart_ls <- ls_temp$chart
chartCursor_listeners <- ls_temp$listeners
# listeners on dataSetSelector
ls_temp <- substituteListener(chart_ls, "dataSetSelector")
chart_ls <- ls_temp$chart
dataSetSelector_listeners <- ls_temp$listeners
# listeners on legend
ls_temp <- substituteListener(chart_ls, "legend")
chart_ls <- ls_temp$chart
legend_listeners <- ls_temp$listeners
# listeners on panels
ls_temp <- substituteMultiListeners(chart_ls, "panels")
chart_ls <- ls_temp$chart
panels_listeners <- ls_temp$listeners_ls
panels_listenersIndices <- ls_temp$indices
# listeners on periodSelector
ls_temp <- substituteListener(chart_ls, "periodSelector")
chart_ls <- ls_temp$chart
periodSelector_listeners <- ls_temp$listeners
# listeners on valueAxis
ls_temp <- substituteMultiListeners(chart_ls, "valueAxes")
chart_ls <- ls_temp$chart
valueAxes_listeners <- ls_temp$listeners_ls
valueAxes_listenersIndices <- ls_temp$indices
# group (Stock synchronisation)
if (exists("group", where = chart_ls)) {
group <- chart_ls$group
chart_ls[grep(x = names(chart_ls), pattern = "^group")] <- NULL
if(group == ""){
group <- NULL
}
} else {
group <- NULL
}
# case for drilldown chart
if (exists("subChartProperties", where = chart_ls)) {
jsFile <- "amDrillChart"
chart_ls["subChartProperties"] <- NULL
data <- list(main = chart_ls,
subProperties = x@subChartProperties,
background = background)
} else {
jsFile <- "ramcharts_base"
data <- list(chartData = chart_ls,
background = background,
# listeners on chart
listeners = listeners,
#listeners on properties
axes_listeners = axes_listeners,
axes_listenersIndices = axes_listenersIndices,
categoryAxis_listeners = categoryAxis_listeners,
chartCursor_listeners = chartCursor_listeners,
dataSetSelector_listeners = dataSetSelector_listeners,
legend_listeners = legend_listeners,
panels_listeners = panels_listeners,
panels_listenersIndices = panels_listenersIndices,
periodSelector_listeners = periodSelector_listeners,
valueAxes_listeners = valueAxes_listeners,
valueAxes_listenersIndices = valueAxes_listenersIndices,
group = group)
}
# Create initial widget
widget <- htmlwidgets::createWidget(name = eval(jsFile),
x = data,
width = width,
height = height,
package = 'rAmCharts')
# Add dependencies if necessary
widget <- .add_type_dependency(widget = widget, data = data, type = x@type)
widget <- .add_export_dependency(widget = widget, data = data)
widget <- .add_theme_dependency(widget = widget, data = data)
widget <- .add_dataloader_dependency(widget = widget, data = data)
widget <- .add_responsive_dependency(widget = widget, data = data)
widget <- .add_language_dependency(widget = widget, data = data)
return(widget)
})
#' Add dependency for chart type
#' @import yaml
#' @noRd
#'
.add_type_dependency <- function(widget,
data,
type = c("funnel", "gantt", "gauge", "pie",
"radar", "serial", "stock", "xy"))
{
type <- match.arg(type)
if (type == "stock") type <- "amstock" # modification temporaire
file_js <- paste0(type, ".js")
# For some type, we need to source also 'serial.js'
if (type %in% c("gantt", "amstock")) file_js <- c("serial.js", file_js)
# Load the configuration yaml file into list
conf_list <- yaml::yaml.load_file(system.file("conf.yaml", package = "rAmCharts"))
# Add main js dependency
type_dep <- htmltools::htmlDependency(name = paste0("amcharts_type_", type),
# name = paste0("amcharts_type", type),
version = conf_list$amcharts_version,
src = c(file = system.file("htmlwidgets/lib", package = "rAmCharts")),
script = file_js)
widget <- .add_dependency(widget = widget, dependency = type_dep)
# Add stylesheet if necessary
if (type == "amstock") {
style_dep <- htmltools::htmlDependency(name = conf_list$styles$amstockcharts$name,
version = conf_list$amcharts_version,
src = c(file = system.file("htmlwidgets/lib", package = "rAmCharts")),
stylesheet = conf_list$styles$amstockcharts$script)
widget <- .add_dependency(widget = widget, dependency = style_dep)
} else {
# No stylesheet needed
}
return (widget)
}
#' Add dependency for export
#' @noRd
.add_export_dependency <- function(widget, data)
{
cond <- exists("chartData", where = data) &&
exists("export", where = data$chartData) &&
data$chartData$export$enabled
if (cond) widget <- add_export_dependency(widget)
return (widget)
}
#' @title Add the export dependency to an htmlwidget
#'
#' @description Add the 'export' dependency to an htmlwidget.
#' You can only manipulate the htmlwidget if you call the method 'plot' with an rAmChart.
#'
#' @param widget An htmlwidget.
#'
#' @return Return the updated widget with the 'export' dependency.
#'
#' @export
#'
add_export_dependency <- function (widget)
{
# Load the configuration yaml file into list
conf_list <- yaml::yaml.load_file(system.file("conf.yaml", package = "rAmCharts"))
export_dep <- htmltools::htmlDependency(name = conf_list$plugins$export$name,
version = conf_list$amcharts_version,
src = system.file("htmlwidgets/lib/plugins/export", package = "rAmCharts"),
stylesheet = conf_list$plugins$export$stylesheet,
script = conf_list$plugins$export$script)
widget <- .add_dependency(widget = widget, dependency = export_dep)
return (widget)
}
#' Add theme
#' @noRd
.add_theme_dependency <- function(widget, data)
{
cond <- exists("chartData", where = data) &&
exists("theme", where = data$chartData) &&
length(data$chartData$theme) &&
(data$chartData$theme != "default")
if (cond) {
theme_js <- switch(data$chartData$theme,
"light" = "light.js",
"patterns" = "patterns.js",
"dark" = "dark.js",
"chalk" = "chalk.js",
stop("[plot]: invalid theme"))
widget <- add_theme_dependency(widget = widget, theme_js = theme_js)
} else {
# Nothing to do, the condition is FALSE
}
return (widget)
}
#' @title Add theme dependency
#'
#' @description Add the 'theme' dependency to an htmlwidget.
#' You can only manipulate the htmlwidget if you call the method 'plot' with an rAmChart.
#'
#' @param widget An htmlwidget.
#' @param theme_js A character indicating the JS file dependency.
#'
#' @return Return the updated htmlwidget.
#'
#' @examples
#' \donttest{
#' library(pipeR)
#' amPlot(1:10, theme = "dark") %>>% plot() %>>% add_theme_dependency("light.js")
#' }
#'
#' @export
#'
add_theme_dependency <- function (widget, theme_js = c("light.js", "patterns.js", "dark.js", "chalk.js"))
{
# Load the configuration yaml file into list
conf_list <- yaml::yaml.load_file(system.file("conf.yaml", package = "rAmCharts"))
theme_dep <- htmltools::htmlDependency(name = paste0("amcharts_themes_", theme_js),
version = conf_list$amcharts_version,
src = system.file("htmlwidgets/lib/themes", package = "rAmCharts"),
script = theme_js)
widget <- .add_dependency(widget = widget, dependency = theme_dep)
return (widget)
}
#' Add dataloader feature
#' @noRd
.add_dataloader_dependency <- function(widget, data)
{
cond1 <- exists("chartData", where = data) &&
exists("dataLoader", where = data$chartData)
cond2 <- exists("chartData", where = data) &&
any(sapply(X = data$chartData$dataSets, FUN = exists, x = "dataLoader"))
if (cond1 || cond2) widget <- add_dataloader_dependency(widget = widget)
return(widget)
}
#' @title Add dataloader dependency
#'
#' @description Add the 'dataloader' dependency to an htmlwidget.
#' You can only manipulate the htmlwidget if you call the method 'plot' with an rAmChart.
#'
#' @param widget An htmlwidget
#'
#' @return Return the updated htmlwidget.
#'
#' @export
#'
add_dataloader_dependency <- function(widget)
{
# Load the configuration yaml file into list
conf_list <- yaml::yaml.load_file(system.file("conf.yaml", package = "rAmCharts"))
dataloader_dep <- htmltools::htmlDependency(name = conf_list$plugins$dataloader$name,
version = conf_list$amcharts_version,
src = system.file("htmlwidgets/lib/plugins/dataloader", package = "rAmCharts"),
script = conf_list$plugins$dataloader$script)
widget <- .add_dependency(widget = widget, dependency = dataloader_dep)
return(widget)
}
#' Add responsive feature
#' @noRd
.add_responsive_dependency <- function(widget, data, version)
{
cond <- exists("chartData", where = data) && exists("responsive", where = data$chartData)
if (cond) widget <- add_responsive_dependency(widget)
return(widget)
}
#' @title Add responsive dependency
#'
#' @description Add the 'responsive' dependency to an htmlwidget.
#' You can only manipulate the htmlwidget if you call the method 'plot' with an rAmChart.
#'
#' @param widget An htmlwidget.
#'
#' @return Return an updated htmlwidget with the dependency.
#'
#' @export
#'
add_responsive_dependency <- function(widget)
{
# Load the configuration yaml file into list
conf_list <- yaml::yaml.load_file(system.file("conf.yaml", package = "rAmCharts"))
responsive_dep <- htmltools::htmlDependency(name = conf_list$plugins$responsive$name,
version = conf_list$amcharts_version,
src = system.file("htmlwidgets/lib/plugins/responsive", package = "rAmCharts"),
script = conf_list$plugins$responsive$script)
widget <- .add_dependency(widget = widget, dependency = responsive_dep)
return(widget)
}
#' @title Add language
#'
#' @description Add the javascript file associated to the language if necessary
#'
#' @param widget An htmlwidget.
#' @param data The associated data list.
#'
#' @return Return an updated htmlwidget with the dependency.
#'
#' @noRd
#' @export
#'
.add_language_dependency <- function(widget, data)
{
# Load the configuration yaml file into list
conf_list <- yaml::yaml.load_file(system.file("conf.yaml", package = "rAmCharts"))
language <- data$chartData$language
if (length(language) > 0) {
language_dep_general <- htmltools::htmlDependency(name = "general_language",
version = conf_list$amcharts_version,
src = system.file("htmlwidgets/lib/lang",
package = "rAmCharts"),
script = paste0(language, ".js"))
language_dep_export <- htmltools::htmlDependency(name = "export_language",
version = conf_list$amcharts_version,
src = system.file("htmlwidgets/lib/plugins/export/lang",
package = "rAmCharts"),
script = paste0(language, ".js"))
widget <- .add_dependency(widget = widget, dependency = language_dep_general)
widget <- .add_dependency(widget = widget, dependency = language_dep_export)
} else {
# no need to add the dependency
}
return(widget)
}
#' Substitue listeners from a single chart object
#' @param chart \code{list} of chart properties.
#' @param obj \code{character} naming the object.
#' @noRd
substituteListener <- function(chart, obj)
{
if (exists(obj, where = chart) &&
exists("listeners", where = chart[[eval(obj)]])) {
chart_obj <- chart[[eval(obj)]]
listeners <- chart_obj[["listeners"]]
chart_obj["listeners"] <- NULL
chart[[eval(obj)]] <- chart_obj
} else {
listeners <- NULL
}
return(list(chart = chart, listeners = listeners))
}
#' Substitue listeners from a multiple chart object
#' @param chart \code{list} of chart properties.
#' @param obj \code{character} naming the object.
#' @examples
#' x <- list(valueAxes = list(list(title = "tata"),
#' list(title = "titi"),
#' list(title = "tata", listeners = "tocnzj")))
#'
#' substituteMultiListeners(x, "valueAxes")
#'
#' #---
#' x <- list(valueAxes = list(list(title = "tata"),
#' list(title = "titi"),
#' list(title = "tata")))
#'
#' substituteMultiListeners(x, "valueAxes")
#' @noRd
substituteMultiListeners <- function(chart, obj)
{
indices <- NULL
listeners_ls <- NULL
if (exists(obj, where = chart)) {
# which element has listener(s) ?
cond <- lapply(chart[[eval(obj)]], function(x) "listeners" %in% names(x))
indices <- which(unlist(cond))
if (length(indices)) {
# for element that have listener(s)
listeners_ls <- lapply(indices, function(i) {
chart_obji <- chart[[eval(obj)]][[i]]
listeners <- chart_obji[["listeners"]]
chart_obji["listeners"] <- NULL
chart[[eval(obj)]][[i]] <<- chart_obji
return(listeners)
})
# reformat data for JavaScript
if (length(indices) == 1)
indices <- list(indices)
} else {}
} else {}
return(list(chart = chart, listeners_ls = listeners_ls, indices = indices))
}
#' @title Add any dependency to an htmlwidget
#' @param widget An htmlwidget.
#' @param dependency An htmlDependency.
#' @return The widget with the given dependency
#' @noRd
#'
.add_dependency <- function (widget, dependency)
{
if (length(widget$dependencies) == 0) widget$dependencies <- list()
widget$dependencies[[length(widget$dependencies)+1]] <- dependency
return(widget)
}
| /R/union_AmCharts.R | no_license | msabr027/rAmCharts | R | false | false | 20,954 | r | #' @include class_AmChart.R class_AmStockChart.R
NULL
setClassUnion(name = "AmCharts", members = c("AmChart", "AmStockChart"))
#' @title Setters for AmChart and AmStockChart.
#' @description These methods can be used both for AmChart and AmStockChart.
#' There are general for some first-level properties.
#' @param .Object \linkS4class{AmChart} or \linkS4class{AmStockChart}.
#' @param enabled \code{logical}, TRUE to display the export button.
#' @param ... Other properties that can be used depending on the setter.
#' @rdname amcharts-setters
#' @export
#'
setGeneric(name = "setExport", def = function(.Object, enabled = TRUE, ...) {standardGeneric("setExport")})
#' @examples
#' \donttest{
#' # Dummy examples
#' setExport(amPlot(1:10))
#' setExport(amStockChart())
#' }
#' @rdname amcharts-setters
#'
setMethod(f = "setExport", signature = c("AmCharts", "logicalOrMissing"),
definition = function(.Object, enabled = TRUE, ...)
{
.Object <- setProperties( .Object, export = list(enabled = enabled, ...) )
validObject(.Object)
return(.Object)
})
#' @rdname amcharts-setters
#' @export
#'
setGeneric(name = "setResponsive", def = function(.Object, enabled = TRUE, ...) {standardGeneric("setResponsive")})
#' @examples
#' \donttest{
#' # Dummy examples
#' setResponsive(amSerialChart())
#' setResponsive(amStockChart())
#' }
#' @rdname amcharts-setters
setMethod(f = "setResponsive", signature = c("AmCharts", "logicalOrMissing"),
definition = function(.Object, enabled = TRUE, ...)
{
.Object <- setProperties(.Object = .Object, responsive = list(enabled = enabled, ...))
validObject(.Object)
return(.Object)
})
#' Test wether a chart can be plotted (or printed)
#' @noRd
#'
.plot_or_print <- function(object)
{
if (length(object@type)) {
# cat("Plotting...")
chart_widget <- plot(object)
if (isTRUE(getOption('knitr.in.progress')))
knitr::knit_print(chart_widget)
else
print(chart_widget)
} else {
# cat("Printing...")
print(object)
}
}
#' @title Visualize AmStockChart with show
#' @description Display the object in the console.
#' @param object \linkS4class{AmChart}.
#' @return If the object has a valid type, it will plot the chart.
#' If not the method will trigger the method 'print'.
#'
setMethod(f = "show", signature = "AmChart", definition = .plot_or_print)
#' @title Visualize AmStockChart with show
#' @description Display the object in the console.
#' @param object \linkS4class{AmStockChart}.
#' @return If the object has a valid type, it will plot the chart.
#' If not the method will trigger the method 'print'.
#'
setMethod(f = "show", signature = "AmStockChart", definition = .plot_or_print)
#' @title PLOTTING METHOD
#' @description Basic method to plot an AmChart
#' @details Plots an object of class \code{\linkS4class{AmChart}}
#' @param x \linkS4class{AmChart}
#' @param y unused.
#' @param width \code{character}.
#' @param height \code{character}.B
#' @param background \code{character}.
#' @param ... Other properties.
#' @rdname plot.AmChart
#' @import htmlwidgets
#' @import htmltools
#' @export
setMethod(f = "plot", signature = "AmCharts",
definition = function(x, y, width = "100%", height = NULL,
background = "#ffffff", ...)
{
chart_ls <- listProperties(x)
# remove temporary parameter
chart_ls["RType_"] <- NULL
theme <- chart_ls$theme
if (length(theme)) {
background <- switch(theme,
"light" = "#ffffff",
"patterns" = "#ffffff",
"default" = "#ffffff",
"dark" = "#3f3f4f",
"chalk" = "#282828",
stop("[plot]: invalid theme"))
} else {}
# set background
if (exists("backgroundColor", where = chart_ls)) {
background <- chart_ls$backgroundColor
chart_ls["backgroundColor"] <- NULL
} else {}
# listeners on chart
if (exists("listeners", where = chart_ls)) {
listeners <- chart_ls$listeners
chart_ls[grep(x = names(chart_ls), pattern = "^listeners")] <- NULL
} else {
listeners <- NULL
}
# listeners on axes (GaugeAxis class)
ls_temp <- substituteMultiListeners(chart_ls, "axes")
chart_ls <- ls_temp$chart
axes_listeners <- ls_temp$listeners_ls
axes_listenersIndices <- ls_temp$indices
# listeners on categoryAxis
ls_temp <- substituteListener(chart_ls, "categoryAxis")
chart_ls <- ls_temp$chart
categoryAxis_listeners <- ls_temp$listeners
# listeners on chartCursor
ls_temp <- substituteListener(chart_ls, "chartCursor")
chart_ls <- ls_temp$chart
chartCursor_listeners <- ls_temp$listeners
# listeners on dataSetSelector
ls_temp <- substituteListener(chart_ls, "dataSetSelector")
chart_ls <- ls_temp$chart
dataSetSelector_listeners <- ls_temp$listeners
# listeners on legend
ls_temp <- substituteListener(chart_ls, "legend")
chart_ls <- ls_temp$chart
legend_listeners <- ls_temp$listeners
# listeners on panels
ls_temp <- substituteMultiListeners(chart_ls, "panels")
chart_ls <- ls_temp$chart
panels_listeners <- ls_temp$listeners_ls
panels_listenersIndices <- ls_temp$indices
# listeners on periodSelector
ls_temp <- substituteListener(chart_ls, "periodSelector")
chart_ls <- ls_temp$chart
periodSelector_listeners <- ls_temp$listeners
# listeners on valueAxis
ls_temp <- substituteMultiListeners(chart_ls, "valueAxes")
chart_ls <- ls_temp$chart
valueAxes_listeners <- ls_temp$listeners_ls
valueAxes_listenersIndices <- ls_temp$indices
# group (Stock synchronisation)
if (exists("group", where = chart_ls)) {
group <- chart_ls$group
chart_ls[grep(x = names(chart_ls), pattern = "^group")] <- NULL
if(group == ""){
group <- NULL
}
} else {
group <- NULL
}
# case for drilldown chart
if (exists("subChartProperties", where = chart_ls)) {
jsFile <- "amDrillChart"
chart_ls["subChartProperties"] <- NULL
data <- list(main = chart_ls,
subProperties = x@subChartProperties,
background = background)
} else {
jsFile <- "ramcharts_base"
data <- list(chartData = chart_ls,
background = background,
# listeners on chart
listeners = listeners,
#listeners on properties
axes_listeners = axes_listeners,
axes_listenersIndices = axes_listenersIndices,
categoryAxis_listeners = categoryAxis_listeners,
chartCursor_listeners = chartCursor_listeners,
dataSetSelector_listeners = dataSetSelector_listeners,
legend_listeners = legend_listeners,
panels_listeners = panels_listeners,
panels_listenersIndices = panels_listenersIndices,
periodSelector_listeners = periodSelector_listeners,
valueAxes_listeners = valueAxes_listeners,
valueAxes_listenersIndices = valueAxes_listenersIndices,
group = group)
}
# Create initial widget
widget <- htmlwidgets::createWidget(name = eval(jsFile),
x = data,
width = width,
height = height,
package = 'rAmCharts')
# Add dependencies if necessary
widget <- .add_type_dependency(widget = widget, data = data, type = x@type)
widget <- .add_export_dependency(widget = widget, data = data)
widget <- .add_theme_dependency(widget = widget, data = data)
widget <- .add_dataloader_dependency(widget = widget, data = data)
widget <- .add_responsive_dependency(widget = widget, data = data)
widget <- .add_language_dependency(widget = widget, data = data)
return(widget)
})
#' Add dependency for chart type
#' @import yaml
#' @noRd
#'
.add_type_dependency <- function(widget,
data,
type = c("funnel", "gantt", "gauge", "pie",
"radar", "serial", "stock", "xy"))
{
type <- match.arg(type)
if (type == "stock") type <- "amstock" # modification temporaire
file_js <- paste0(type, ".js")
# For some type, we need to source also 'serial.js'
if (type %in% c("gantt", "amstock")) file_js <- c("serial.js", file_js)
# Load the configuration yaml file into list
conf_list <- yaml::yaml.load_file(system.file("conf.yaml", package = "rAmCharts"))
# Add main js dependency
type_dep <- htmltools::htmlDependency(name = paste0("amcharts_type_", type),
# name = paste0("amcharts_type", type),
version = conf_list$amcharts_version,
src = c(file = system.file("htmlwidgets/lib", package = "rAmCharts")),
script = file_js)
widget <- .add_dependency(widget = widget, dependency = type_dep)
# Add stylesheet if necessary
if (type == "amstock") {
style_dep <- htmltools::htmlDependency(name = conf_list$styles$amstockcharts$name,
version = conf_list$amcharts_version,
src = c(file = system.file("htmlwidgets/lib", package = "rAmCharts")),
stylesheet = conf_list$styles$amstockcharts$script)
widget <- .add_dependency(widget = widget, dependency = style_dep)
} else {
# No stylesheet needed
}
return (widget)
}
#' Add dependency for export
#' @noRd
.add_export_dependency <- function(widget, data)
{
cond <- exists("chartData", where = data) &&
exists("export", where = data$chartData) &&
data$chartData$export$enabled
if (cond) widget <- add_export_dependency(widget)
return (widget)
}
#' @title Add the export dependency to an htmlwidget
#'
#' @description Add the 'export' dependency to an htmlwidget.
#' You can only manipulate the htmlwidget if you call the method 'plot' with an rAmChart.
#'
#' @param widget An htmlwidget.
#'
#' @return Return the updated widget with the 'export' dependency.
#'
#' @export
#'
add_export_dependency <- function (widget)
{
# Load the configuration yaml file into list
conf_list <- yaml::yaml.load_file(system.file("conf.yaml", package = "rAmCharts"))
export_dep <- htmltools::htmlDependency(name = conf_list$plugins$export$name,
version = conf_list$amcharts_version,
src = system.file("htmlwidgets/lib/plugins/export", package = "rAmCharts"),
stylesheet = conf_list$plugins$export$stylesheet,
script = conf_list$plugins$export$script)
widget <- .add_dependency(widget = widget, dependency = export_dep)
return (widget)
}
#' Add theme
#' @noRd
.add_theme_dependency <- function(widget, data)
{
cond <- exists("chartData", where = data) &&
exists("theme", where = data$chartData) &&
length(data$chartData$theme) &&
(data$chartData$theme != "default")
if (cond) {
theme_js <- switch(data$chartData$theme,
"light" = "light.js",
"patterns" = "patterns.js",
"dark" = "dark.js",
"chalk" = "chalk.js",
stop("[plot]: invalid theme"))
widget <- add_theme_dependency(widget = widget, theme_js = theme_js)
} else {
# Nothing to do, the condition is FALSE
}
return (widget)
}
#' @title Add theme dependency
#'
#' @description Add the 'theme' dependency to an htmlwidget.
#' You can only manipulate the htmlwidget if you call the method 'plot' with an rAmChart.
#'
#' @param widget An htmlwidget.
#' @param theme_js A character indicating the JS file dependency.
#'
#' @return Return the updated htmlwidget.
#'
#' @examples
#' \donttest{
#' library(pipeR)
#' amPlot(1:10, theme = "dark") %>>% plot() %>>% add_theme_dependency("light.js")
#' }
#'
#' @export
#'
add_theme_dependency <- function (widget, theme_js = c("light.js", "patterns.js", "dark.js", "chalk.js"))
{
# Load the configuration yaml file into list
conf_list <- yaml::yaml.load_file(system.file("conf.yaml", package = "rAmCharts"))
theme_dep <- htmltools::htmlDependency(name = paste0("amcharts_themes_", theme_js),
version = conf_list$amcharts_version,
src = system.file("htmlwidgets/lib/themes", package = "rAmCharts"),
script = theme_js)
widget <- .add_dependency(widget = widget, dependency = theme_dep)
return (widget)
}
#' Add dataloader feature
#' @noRd
.add_dataloader_dependency <- function(widget, data)
{
cond1 <- exists("chartData", where = data) &&
exists("dataLoader", where = data$chartData)
cond2 <- exists("chartData", where = data) &&
any(sapply(X = data$chartData$dataSets, FUN = exists, x = "dataLoader"))
if (cond1 || cond2) widget <- add_dataloader_dependency(widget = widget)
return(widget)
}
#' @title Add dataloader dependency
#'
#' @description Add the 'dataloader' dependency to an htmlwidget.
#' You can only manipulate the htmlwidget if you call the method 'plot' with an rAmChart.
#'
#' @param widget An htmlwidget
#'
#' @return Return the updated htmlwidget.
#'
#' @export
#'
add_dataloader_dependency <- function(widget)
{
# Load the configuration yaml file into list
conf_list <- yaml::yaml.load_file(system.file("conf.yaml", package = "rAmCharts"))
dataloader_dep <- htmltools::htmlDependency(name = conf_list$plugins$dataloader$name,
version = conf_list$amcharts_version,
src = system.file("htmlwidgets/lib/plugins/dataloader", package = "rAmCharts"),
script = conf_list$plugins$dataloader$script)
widget <- .add_dependency(widget = widget, dependency = dataloader_dep)
return(widget)
}
#' Add responsive feature
#' @noRd
.add_responsive_dependency <- function(widget, data, version)
{
cond <- exists("chartData", where = data) && exists("responsive", where = data$chartData)
if (cond) widget <- add_responsive_dependency(widget)
return(widget)
}
#' @title Add responsive dependency
#'
#' @description Add the 'responsive' dependency to an htmlwidget.
#' You can only manipulate the htmlwidget if you call the method 'plot' with an rAmChart.
#'
#' @param widget An htmlwidget.
#'
#' @return Return an updated htmlwidget with the dependency.
#'
#' @export
#'
add_responsive_dependency <- function(widget)
{
# Load the configuration yaml file into list
conf_list <- yaml::yaml.load_file(system.file("conf.yaml", package = "rAmCharts"))
responsive_dep <- htmltools::htmlDependency(name = conf_list$plugins$responsive$name,
version = conf_list$amcharts_version,
src = system.file("htmlwidgets/lib/plugins/responsive", package = "rAmCharts"),
script = conf_list$plugins$responsive$script)
widget <- .add_dependency(widget = widget, dependency = responsive_dep)
return(widget)
}
#' @title Add language
#'
#' @description Add the javascript file associated to the language if necessary
#'
#' @param widget An htmlwidget.
#' @param data The associated data list.
#'
#' @return Return an updated htmlwidget with the dependency.
#'
#' @noRd
#' @export
#'
.add_language_dependency <- function(widget, data)
{
# Load the configuration yaml file into list
conf_list <- yaml::yaml.load_file(system.file("conf.yaml", package = "rAmCharts"))
language <- data$chartData$language
if (length(language) > 0) {
language_dep_general <- htmltools::htmlDependency(name = "general_language",
version = conf_list$amcharts_version,
src = system.file("htmlwidgets/lib/lang",
package = "rAmCharts"),
script = paste0(language, ".js"))
language_dep_export <- htmltools::htmlDependency(name = "export_language",
version = conf_list$amcharts_version,
src = system.file("htmlwidgets/lib/plugins/export/lang",
package = "rAmCharts"),
script = paste0(language, ".js"))
widget <- .add_dependency(widget = widget, dependency = language_dep_general)
widget <- .add_dependency(widget = widget, dependency = language_dep_export)
} else {
# no need to add the dependency
}
return(widget)
}
#' Substitue listeners from a single chart object
#' @param chart \code{list} of chart properties.
#' @param obj \code{character} naming the object.
#' @noRd
substituteListener <- function(chart, obj)
{
if (exists(obj, where = chart) &&
exists("listeners", where = chart[[eval(obj)]])) {
chart_obj <- chart[[eval(obj)]]
listeners <- chart_obj[["listeners"]]
chart_obj["listeners"] <- NULL
chart[[eval(obj)]] <- chart_obj
} else {
listeners <- NULL
}
return(list(chart = chart, listeners = listeners))
}
#' Substitue listeners from a multiple chart object
#' @param chart \code{list} of chart properties.
#' @param obj \code{character} naming the object.
#' @examples
#' x <- list(valueAxes = list(list(title = "tata"),
#' list(title = "titi"),
#' list(title = "tata", listeners = "tocnzj")))
#'
#' substituteMultiListeners(x, "valueAxes")
#'
#' #---
#' x <- list(valueAxes = list(list(title = "tata"),
#' list(title = "titi"),
#' list(title = "tata")))
#'
#' substituteMultiListeners(x, "valueAxes")
#' @noRd
substituteMultiListeners <- function(chart, obj)
{
indices <- NULL
listeners_ls <- NULL
if (exists(obj, where = chart)) {
# which element has listener(s) ?
cond <- lapply(chart[[eval(obj)]], function(x) "listeners" %in% names(x))
indices <- which(unlist(cond))
if (length(indices)) {
# for element that have listener(s)
listeners_ls <- lapply(indices, function(i) {
chart_obji <- chart[[eval(obj)]][[i]]
listeners <- chart_obji[["listeners"]]
chart_obji["listeners"] <- NULL
chart[[eval(obj)]][[i]] <<- chart_obji
return(listeners)
})
# reformat data for JavaScript
if (length(indices) == 1)
indices <- list(indices)
} else {}
} else {}
return(list(chart = chart, listeners_ls = listeners_ls, indices = indices))
}
#' @title Add any dependency to an htmlwidget
#' @param widget An htmlwidget.
#' @param dependency An htmlDependency.
#' @return The widget with the given dependency
#' @noRd
#'
.add_dependency <- function (widget, dependency)
{
if (length(widget$dependencies) == 0) widget$dependencies <- list()
widget$dependencies[[length(widget$dependencies)+1]] <- dependency
return(widget)
}
|
#' @title Push local files to a remote server
#'
#' @description \code{push} makes a remote copy of a new or updated local file(s)
#'
#' @param path Character string for path to be pushed, or path containing file to be pushed. If \code{path2} is specified, \code{path} refers only to local source path.
#' @param remoteName Character string for remote server;
#' @param fileName Character string of file within \code{path} to be pushed. If \code{""}, all files in \code{path} are pushed
#' @param verbose logical. print extra output.
#' @param path2 remote destination path; if blank, \code{path2} is the remote equivalent of \code{path}
#'
#' @details
#' Works via ssh and rsync. Can push all files in a folder, or a specific file in the specified path. The only files that are altered remotely are those that a newer locally (or only present locally). Will not delete a remote file if it isn't already present locally, and will not overwrite a remote file if the local version is older (or absent). Does not change local files. Assumes local and remote file paths are identical unless \code{path2} is specified.
#'
#' @seealso \code{\link{pull}} for the opposite process, \code{\link{run}} to run script remotely, and \code{\link{prp}} to push run pull. See \code{\link{rbLib-package}} for overview.
#'
#' @examples
#' \dontrun{
#' path <- "./Documents/School&Work/NCEAS_UnderIce/core/scripts/analysis/"
#' scriptName <- "reset.sim.R"
#' remoteName <- "ryanb@@amphiprion.deenr.rutgers.edu"
#' (push(path, remoteName, fileName=scriptName))
#' }
#'
#' @export
push <- function(path, remoteName, fileName="", verbose=FALSE, path2){
# from local, push script to remote
if(missing(path2)){
path2 <- gsub("&", "\\\\&", path, perl=TRUE)
}
rsync.cmd <- "rsync -azuP --stats "
push.sh <- paste0(
rsync.cmd,
"\"",
path,
fileName,
"\" ",
remoteName,
":\'",
path2,
"\'"
)
if(verbose){
cat(push.sh, "\n")
system(push.sh)
}else{
invisible(system(push.sh))
}
}
#' Run a remote script remotely
#'
#' @description \code{run} runs an R script remotely
#'
#' @param scriptName Character string indicating the name of the script to be run
#' @param path Character string for path to script. Must end with a "/"
#' @param remoteName Character string for remote server in username@@place.edu format. See \code{run.remote} in the package \code{ssh.utils}
#' @param verbose logical. print extra output.
#' @param debugMode logical. whether the run should load a file called '.RData' present in the directory, and if it should save a file (image) by the same name on exit.
#'
#' @seealso \code{\link{pull}} for the pulling in remote files, \code{\link{push}} to push a local file to the remote server, and \code{\link{prp}} to push run pull. See \code{\link{rbLib-package}} for overview.
#'
#' @examples
#' \dontrun{
#' path <- "./Documents/School&Work/NCEAS_UnderIce/core/scripts/analysis/"
#' scriptName <- "reset.sim.R"
#' remoteName <- "ryanb@@amphiprion.deenr.rutgers.edu"
#' (run(scriptName, path, remoteName))
#' }
#'
#' @export
run <- function(scriptName, path, remoteName, verbose=FALSE, debugMode=FALSE){
requireNamespace("ssh.utils", quietly = TRUE)
# from remote, run script
rr.cmd.cd <- paste0("cd \'", path, "\'")
if(debugMode){
rr.cmd <- paste0("nohup R CMD BATCH ", scriptName, " &")
}else{
rr.cmd <- paste0("nohup R CMD BATCH --vanilla --no-save ", scriptName, " &")
}
blah <- ssh.utils::run.remote(paste0(rr.cmd.cd, ";", rr.cmd), remote=remoteName)
if(verbose){
blah
}else{
invisible(blah)
}
}
#' Pull remote files to local system
#'
#' @description \code{pull} makes a local copy of a new or updated remote file(s)
#'
#' @param path Character string for path to be pulled, or path containing file to be pulled. If \code{path2} is specified, \code{path} refers only to the local destination.
#' @param remoteName Character string for remote server in username@@place.edu format. See \code{run.remote} in the package \code{ssh.utils}
#' @param fileName Character string of file within \code{path} to be pulled. If \code{""}, all files in \code{path} are pulled
#' @param verbose logical. print extra output.
#' @param path2 remote source path; if blank, \code{path2} is the remote equivalent of \code{path}
#'
#' @seealso \code{\link{push}} for the opposite process, \code{\link{run}} to run script remotely, and \code{\link{prp}} to push run pull. See \code{\link{rbLib-package}} for overview.
#'
#' @examples
#' \dontrun{
#' path <- "./Documents/School&Work/NCEAS_UnderIce/core/scripts/analysis/"
#' scriptName <- "reset.sim.R"
#' remoteName <- "ryanb@@amphiprion.deenr.rutgers.edu"
#' (pull(path, remoteName, fileName=""))
#' }
#'
#' @export
pull <- function(path, remoteName, fileName="", verbose=FALSE, path2){
ss1 <- utils::fileSnapshot(path, full.names=TRUE)
# from local, pull files updated by script
if(missing(path2)){
path2 <- gsub("&", "\\\\&", path, perl=TRUE)
}
rsync.cmd <- "rsync -azuP --stats "
pull.sh <- paste0(
rsync.cmd,
remoteName,
":\'",
path2,
fileName,
"\' \"",
path,
"\""
)
if(verbose){
cat(pull.sh, "\n")
system(pull.sh)
ss2 <- utils::fileSnapshot(path, full.names=TRUE)
utils::changedFiles(ss1, ss2)
}else{
invisible(system(pull.sh))
}
}
#' Push, Run, then Pull
#'
#' @description \code{prp} pushes local updates to remote, runs script remotely, then pulls remote updates to local
#'
#' @param path Character string for path to be pulled, or path containing file to be pulled
#' @param scriptName Character string of the script to be run
#' @param remoteName Character string for remote server in username@@place.edu format. See \code{run.remote} in the package \code{ssh.utils}
#' @param verbose logical. print extra output.
#' @param debugMode logical. whether the run should load a file called '.RData' present in the directory, and if it should save a file (image) by the same name on exit.
#' @param path2 remote path; if blank, \code{path2} is the remote equivalent of \code{path}
#'
#' @seealso \code{\link{pull}} and \code{\link{push}} to sync files, and \code{\link{run}} to run script remotely. See \code{\link{rbLib-package}} for overview.
#'
#' @examples
#' \dontrun{
#' path <- "./Documents/School&Work/NCEAS_UnderIce/core/scripts/analysis/"
#' scriptName <- "reset.sim.R"
#' remoteName <- "ryanb@@amphiprion.deenr.rutgers.edu"
#' prp(path, scriptName, remoteName, verbose=TRUE)
#' }
#'
#' @export
prp <- function(path, scriptName, remoteName, verbose=FALSE, debugMode=FALSE, path2){
if(missing(path2)){
path2 <- gsub("&", "\\\\&", path, perl=TRUE)
}
if(verbose){cat("pushing\n")}
push(path, remoteName, fileName=scriptName, verbose=verbose, path2=path2)
if(verbose){cat("running\n")}
run(scriptName, path=path, remoteName, verbose=verbose, debugMode=debugMode)
if(verbose){cat("pulling\n")}
pull(path, remoteName, fileName="", verbose=verbose, path2=path2)
}
| /R/prp.R | no_license | rBatt/rbLib | R | false | false | 6,942 | r | #' @title Push local files to a remote server
#'
#' @description \code{push} makes a remote copy of a new or updated local file(s)
#'
#' @param path Character string for path to be pushed, or path containing file to be pushed. If \code{path2} is specified, \code{path} refers only to local source path.
#' @param remoteName Character string for remote server;
#' @param fileName Character string of file within \code{path} to be pushed. If \code{""}, all files in \code{path} are pushed
#' @param verbose logical. print extra output.
#' @param path2 remote destination path; if blank, \code{path2} is the remote equivalent of \code{path}
#'
#' @details
#' Works via ssh and rsync. Can push all files in a folder, or a specific file in the specified path. The only files that are altered remotely are those that a newer locally (or only present locally). Will not delete a remote file if it isn't already present locally, and will not overwrite a remote file if the local version is older (or absent). Does not change local files. Assumes local and remote file paths are identical unless \code{path2} is specified.
#'
#' @seealso \code{\link{pull}} for the opposite process, \code{\link{run}} to run script remotely, and \code{\link{prp}} to push run pull. See \code{\link{rbLib-package}} for overview.
#'
#' @examples
#' \dontrun{
#' path <- "./Documents/School&Work/NCEAS_UnderIce/core/scripts/analysis/"
#' scriptName <- "reset.sim.R"
#' remoteName <- "ryanb@@amphiprion.deenr.rutgers.edu"
#' (push(path, remoteName, fileName=scriptName))
#' }
#'
#' @export
push <- function(path, remoteName, fileName="", verbose=FALSE, path2){
# from local, push script to remote
if(missing(path2)){
path2 <- gsub("&", "\\\\&", path, perl=TRUE)
}
rsync.cmd <- "rsync -azuP --stats "
push.sh <- paste0(
rsync.cmd,
"\"",
path,
fileName,
"\" ",
remoteName,
":\'",
path2,
"\'"
)
if(verbose){
cat(push.sh, "\n")
system(push.sh)
}else{
invisible(system(push.sh))
}
}
#' Run a remote script remotely
#'
#' @description \code{run} runs an R script remotely
#'
#' @param scriptName Character string indicating the name of the script to be run
#' @param path Character string for path to script. Must end with a "/"
#' @param remoteName Character string for remote server in username@@place.edu format. See \code{run.remote} in the package \code{ssh.utils}
#' @param verbose logical. print extra output.
#' @param debugMode logical. whether the run should load a file called '.RData' present in the directory, and if it should save a file (image) by the same name on exit.
#'
#' @seealso \code{\link{pull}} for the pulling in remote files, \code{\link{push}} to push a local file to the remote server, and \code{\link{prp}} to push run pull. See \code{\link{rbLib-package}} for overview.
#'
#' @examples
#' \dontrun{
#' path <- "./Documents/School&Work/NCEAS_UnderIce/core/scripts/analysis/"
#' scriptName <- "reset.sim.R"
#' remoteName <- "ryanb@@amphiprion.deenr.rutgers.edu"
#' (run(scriptName, path, remoteName))
#' }
#'
#' @export
run <- function(scriptName, path, remoteName, verbose=FALSE, debugMode=FALSE){
requireNamespace("ssh.utils", quietly = TRUE)
# from remote, run script
rr.cmd.cd <- paste0("cd \'", path, "\'")
if(debugMode){
rr.cmd <- paste0("nohup R CMD BATCH ", scriptName, " &")
}else{
rr.cmd <- paste0("nohup R CMD BATCH --vanilla --no-save ", scriptName, " &")
}
blah <- ssh.utils::run.remote(paste0(rr.cmd.cd, ";", rr.cmd), remote=remoteName)
if(verbose){
blah
}else{
invisible(blah)
}
}
#' Pull remote files to local system
#'
#' @description \code{pull} makes a local copy of a new or updated remote file(s)
#'
#' @param path Character string for path to be pulled, or path containing file to be pulled. If \code{path2} is specified, \code{path} refers only to the local destination.
#' @param remoteName Character string for remote server in username@@place.edu format. See \code{run.remote} in the package \code{ssh.utils}
#' @param fileName Character string of file within \code{path} to be pulled. If \code{""}, all files in \code{path} are pulled
#' @param verbose logical. print extra output.
#' @param path2 remote source path; if blank, \code{path2} is the remote equivalent of \code{path}
#'
#' @seealso \code{\link{push}} for the opposite process, \code{\link{run}} to run script remotely, and \code{\link{prp}} to push run pull. See \code{\link{rbLib-package}} for overview.
#'
#' @examples
#' \dontrun{
#' path <- "./Documents/School&Work/NCEAS_UnderIce/core/scripts/analysis/"
#' scriptName <- "reset.sim.R"
#' remoteName <- "ryanb@@amphiprion.deenr.rutgers.edu"
#' (pull(path, remoteName, fileName=""))
#' }
#'
#' @export
pull <- function(path, remoteName, fileName="", verbose=FALSE, path2){
ss1 <- utils::fileSnapshot(path, full.names=TRUE)
# from local, pull files updated by script
if(missing(path2)){
path2 <- gsub("&", "\\\\&", path, perl=TRUE)
}
rsync.cmd <- "rsync -azuP --stats "
pull.sh <- paste0(
rsync.cmd,
remoteName,
":\'",
path2,
fileName,
"\' \"",
path,
"\""
)
if(verbose){
cat(pull.sh, "\n")
system(pull.sh)
ss2 <- utils::fileSnapshot(path, full.names=TRUE)
utils::changedFiles(ss1, ss2)
}else{
invisible(system(pull.sh))
}
}
#' Push, Run, then Pull
#'
#' @description \code{prp} pushes local updates to remote, runs script remotely, then pulls remote updates to local
#'
#' @param path Character string for path to be pulled, or path containing file to be pulled
#' @param scriptName Character string of the script to be run
#' @param remoteName Character string for remote server in username@@place.edu format. See \code{run.remote} in the package \code{ssh.utils}
#' @param verbose logical. print extra output.
#' @param debugMode logical. whether the run should load a file called '.RData' present in the directory, and if it should save a file (image) by the same name on exit.
#' @param path2 remote path; if blank, \code{path2} is the remote equivalent of \code{path}
#'
#' @seealso \code{\link{pull}} and \code{\link{push}} to sync files, and \code{\link{run}} to run script remotely. See \code{\link{rbLib-package}} for overview.
#'
#' @examples
#' \dontrun{
#' path <- "./Documents/School&Work/NCEAS_UnderIce/core/scripts/analysis/"
#' scriptName <- "reset.sim.R"
#' remoteName <- "ryanb@@amphiprion.deenr.rutgers.edu"
#' prp(path, scriptName, remoteName, verbose=TRUE)
#' }
#'
#' @export
prp <- function(path, scriptName, remoteName, verbose=FALSE, debugMode=FALSE, path2){
if(missing(path2)){
path2 <- gsub("&", "\\\\&", path, perl=TRUE)
}
if(verbose){cat("pushing\n")}
push(path, remoteName, fileName=scriptName, verbose=verbose, path2=path2)
if(verbose){cat("running\n")}
run(scriptName, path=path, remoteName, verbose=verbose, debugMode=debugMode)
if(verbose){cat("pulling\n")}
pull(path, remoteName, fileName="", verbose=verbose, path2=path2)
}
|
summary.cnapath <- function(object, ..., pdb = NULL, label = NULL, col = NULL,
plot = FALSE, concise = FALSE, cutoff = 0.1, normalize = TRUE) {
pa <- list(object, ...)
if(!all(sapply(pa, inherits, "cnapath")))
stop("Input pa is not a 'cnapath' object")
if(is.null(label)) label = 1:length(pa)
if(is.null(col)) col = 1:length(pa)
out <- list()
# read node numbers on paths
y <- lapply(pa, function(x) unlist(x$path))
# store node degeneracy
node.deg <- lapply(y, table)
if(normalize) {
node.deg <- lapply(node.deg, function(x) x/max(x))
}
# find on-path node by the cutoff
yy <- lapply(node.deg, function(x) x[x >= cutoff])
onpath.node <- unique(names(unlist(yy)))
i <- as.numeric(onpath.node)
onpath.node <- onpath.node[order(i)]
# generate the node degeneracy table
o <- lapply(node.deg, function(x) {
x <- x[match(onpath.node, names(x))]
x[is.na(x)] <- 0
names(x) <- onpath.node
x
} )
# replace node id with pdb resid and resno
if(!is.null(pdb)) {
ca.inds <- atom.select(pdb, elety="CA", verbose = FALSE)
resno <- pdb$atom[ca.inds$atom, "resno"]
resid <- pdb$atom[ca.inds$atom, "resid"]
chain <- pdb$atom[ca.inds$atom, "chain"]
lig.inds <- atom.select(pdb, "ligand", verbose = FALSE)
islig <- paste(chain, resno, sep="_") %in%
paste(pdb$atom[lig.inds$atom, "chain"],
pdb$atom[lig.inds$atom, "resno"], sep="_")
resid[!islig] <- aa321(resid[!islig])
o <- lapply(o, function(x) {
node <- as.numeric(names(x))
if(length(unique(pdb$atom[, "chain"])) > 1)
n <- paste(chain[node], paste(resid[node], resno[node], sep=""), sep="_")
else
n <- paste(resid[node], resno[node], sep="")
names(x) <- n
x
} )
}
names(o) <- label
out$network <- label
out$num.paths <- sapply(pa, function(x) length(x$path))
out$hist <- lapply(pa, function(x) table(cut(x$dist, breaks=5, include.lowest = TRUE)))
if(length(out$hist)==1) out$hist = out$hist[[1]]
out$degeneracy <- do.call(rbind, o)
if(normalize) out$degeneracy <- round(out$degeneracy, digits=2)
if(plot) {
opar <- par(no.readonly = TRUE)
on.exit(par(opar))
layout(matrix(1:2, nrow=1), respect = TRUE)
rgbcolors <- sapply(col, col2rgb) / 255
rgbcolors <- rbind(rgbcolors, alpha = 0.6)
##- for path length distribution
y1 <- lapply(pa, function(x)
hist(x$dist, breaks = 20, plot = FALSE) )
par(mar=c(4, 4, 1, 1))
plot(y1[[1]], freq = FALSE, col = do.call(rgb, as.list(rgbcolors[,1])),
border = col[1], main = "Path Length Distribution",
xlim = range(unlist(lapply(y1, "[[", "breaks"))),
ylim = c(0, max(unlist(lapply(y1, "[[", "density")))),
xlab = "Path length", ylab = "Probability density")
if(length(y1) > 1)
for(i in 2:length(y1)) {
plot(y1[[i]], freq = FALSE, col = do.call(rgb, as.list(rgbcolors[,i])),
border = col[i], add = TRUE)
}
legend("topleft", legend = label, bty = "n", text.col = col)
##- for node degeneracy
y2 <- lapply(pa, function(x) unlist(x$path))
if(!is.null(pdb)) y2 <- lapply(y2, function(x) resno[x])
if(concise) {
# re-number node to get more concise plot
ii <- sort(unique(unlist(y2)))
y2 <- lapply(y2, match, ii)
}
y2 <- lapply(y2, function(x)
hist(x, breaks = c(seq(min(x), max(x), 1) - 0.5, max(x) + 0.5),
plot = FALSE) )
par(mar=c(4, 4, 1, 1))
plot(y2[[1]], freq = TRUE, col = do.call(rgb, as.list(rgbcolors[,1])),
lty = 0, main = "Node Degeneracy",
xlim = range(unlist(lapply(y2, "[[", "breaks"))),
ylim = c(0, max(unlist(lapply(y2, "[[", "counts")))),
xlab = "Node no", ylab = "Number of paths")
if(length(y2) > 1)
for(i in 2:length(y2))
plot(y2[[i]], freq = TRUE, col = do.call(rgb, as.list(rgbcolors[,i])),
lty = 0, add = TRUE)
}
return(out)
}
print.cnapath <- function(x, ...) {
dots = list(...)
if(is.list(x) && all(sapply(x, inherits, "cnapath"))) {
if(!"label" %in% names(dots) || is.null(dots$label)) dots$label = names(x)
names(x) <- NULL
args = c(x, dots)
o <- do.call(summary, args)
} else {
o <- summary(x, ...)
}
if("plot" %in% names(dots)) plot = dots$plot
else plot = FALSE
if(!plot) {
if("normalize" %in% names(dots)) normalize = dots$normalize
else normalize = TRUE
if(length(o$network) > 1) {
cat("Number of networks: ", length(o$network), "(",
paste(o$network, collapse=", "), ")\n")
}
cat("Number of paths in network(s):\n")
if(length(o$network) > 1) {
cat(paste(" ", o$network, ": ", o$num.paths, sep="", collapse="\n"), sep="\n")
cat("\n")
} else {
cat(" ", o$num.paths, "\n\n")
}
cat("Path length distribution: \n")
if(length(o$network) > 1) {
for(i in 1:length(o$network)) {
cat(" --- ", o$network[i], " ---")
print(o$hist[[i]])
cat("\n")
}
} else {
print(o$hist)
cat("\n")
}
cat("Node degeneracy table: \n\n")
if(length(o$network) == 1) rownames(o$degeneracy) = ""
if(normalize)
print(format(o$degeneracy, nsmall=2), quote=FALSE)
else
print(o$degeneracy)
}
}
| /bio3d/R/summary.cnapath.R | no_license | ingted/R-Examples | R | false | false | 5,690 | r | summary.cnapath <- function(object, ..., pdb = NULL, label = NULL, col = NULL,
plot = FALSE, concise = FALSE, cutoff = 0.1, normalize = TRUE) {
pa <- list(object, ...)
if(!all(sapply(pa, inherits, "cnapath")))
stop("Input pa is not a 'cnapath' object")
if(is.null(label)) label = 1:length(pa)
if(is.null(col)) col = 1:length(pa)
out <- list()
# read node numbers on paths
y <- lapply(pa, function(x) unlist(x$path))
# store node degeneracy
node.deg <- lapply(y, table)
if(normalize) {
node.deg <- lapply(node.deg, function(x) x/max(x))
}
# find on-path node by the cutoff
yy <- lapply(node.deg, function(x) x[x >= cutoff])
onpath.node <- unique(names(unlist(yy)))
i <- as.numeric(onpath.node)
onpath.node <- onpath.node[order(i)]
# generate the node degeneracy table
o <- lapply(node.deg, function(x) {
x <- x[match(onpath.node, names(x))]
x[is.na(x)] <- 0
names(x) <- onpath.node
x
} )
# replace node id with pdb resid and resno
if(!is.null(pdb)) {
ca.inds <- atom.select(pdb, elety="CA", verbose = FALSE)
resno <- pdb$atom[ca.inds$atom, "resno"]
resid <- pdb$atom[ca.inds$atom, "resid"]
chain <- pdb$atom[ca.inds$atom, "chain"]
lig.inds <- atom.select(pdb, "ligand", verbose = FALSE)
islig <- paste(chain, resno, sep="_") %in%
paste(pdb$atom[lig.inds$atom, "chain"],
pdb$atom[lig.inds$atom, "resno"], sep="_")
resid[!islig] <- aa321(resid[!islig])
o <- lapply(o, function(x) {
node <- as.numeric(names(x))
if(length(unique(pdb$atom[, "chain"])) > 1)
n <- paste(chain[node], paste(resid[node], resno[node], sep=""), sep="_")
else
n <- paste(resid[node], resno[node], sep="")
names(x) <- n
x
} )
}
names(o) <- label
out$network <- label
out$num.paths <- sapply(pa, function(x) length(x$path))
out$hist <- lapply(pa, function(x) table(cut(x$dist, breaks=5, include.lowest = TRUE)))
if(length(out$hist)==1) out$hist = out$hist[[1]]
out$degeneracy <- do.call(rbind, o)
if(normalize) out$degeneracy <- round(out$degeneracy, digits=2)
if(plot) {
opar <- par(no.readonly = TRUE)
on.exit(par(opar))
layout(matrix(1:2, nrow=1), respect = TRUE)
rgbcolors <- sapply(col, col2rgb) / 255
rgbcolors <- rbind(rgbcolors, alpha = 0.6)
##- for path length distribution
y1 <- lapply(pa, function(x)
hist(x$dist, breaks = 20, plot = FALSE) )
par(mar=c(4, 4, 1, 1))
plot(y1[[1]], freq = FALSE, col = do.call(rgb, as.list(rgbcolors[,1])),
border = col[1], main = "Path Length Distribution",
xlim = range(unlist(lapply(y1, "[[", "breaks"))),
ylim = c(0, max(unlist(lapply(y1, "[[", "density")))),
xlab = "Path length", ylab = "Probability density")
if(length(y1) > 1)
for(i in 2:length(y1)) {
plot(y1[[i]], freq = FALSE, col = do.call(rgb, as.list(rgbcolors[,i])),
border = col[i], add = TRUE)
}
legend("topleft", legend = label, bty = "n", text.col = col)
##- for node degeneracy
y2 <- lapply(pa, function(x) unlist(x$path))
if(!is.null(pdb)) y2 <- lapply(y2, function(x) resno[x])
if(concise) {
# re-number node to get more concise plot
ii <- sort(unique(unlist(y2)))
y2 <- lapply(y2, match, ii)
}
y2 <- lapply(y2, function(x)
hist(x, breaks = c(seq(min(x), max(x), 1) - 0.5, max(x) + 0.5),
plot = FALSE) )
par(mar=c(4, 4, 1, 1))
plot(y2[[1]], freq = TRUE, col = do.call(rgb, as.list(rgbcolors[,1])),
lty = 0, main = "Node Degeneracy",
xlim = range(unlist(lapply(y2, "[[", "breaks"))),
ylim = c(0, max(unlist(lapply(y2, "[[", "counts")))),
xlab = "Node no", ylab = "Number of paths")
if(length(y2) > 1)
for(i in 2:length(y2))
plot(y2[[i]], freq = TRUE, col = do.call(rgb, as.list(rgbcolors[,i])),
lty = 0, add = TRUE)
}
return(out)
}
print.cnapath <- function(x, ...) {
dots = list(...)
if(is.list(x) && all(sapply(x, inherits, "cnapath"))) {
if(!"label" %in% names(dots) || is.null(dots$label)) dots$label = names(x)
names(x) <- NULL
args = c(x, dots)
o <- do.call(summary, args)
} else {
o <- summary(x, ...)
}
if("plot" %in% names(dots)) plot = dots$plot
else plot = FALSE
if(!plot) {
if("normalize" %in% names(dots)) normalize = dots$normalize
else normalize = TRUE
if(length(o$network) > 1) {
cat("Number of networks: ", length(o$network), "(",
paste(o$network, collapse=", "), ")\n")
}
cat("Number of paths in network(s):\n")
if(length(o$network) > 1) {
cat(paste(" ", o$network, ": ", o$num.paths, sep="", collapse="\n"), sep="\n")
cat("\n")
} else {
cat(" ", o$num.paths, "\n\n")
}
cat("Path length distribution: \n")
if(length(o$network) > 1) {
for(i in 1:length(o$network)) {
cat(" --- ", o$network[i], " ---")
print(o$hist[[i]])
cat("\n")
}
} else {
print(o$hist)
cat("\n")
}
cat("Node degeneracy table: \n\n")
if(length(o$network) == 1) rownames(o$degeneracy) = ""
if(normalize)
print(format(o$degeneracy, nsmall=2), quote=FALSE)
else
print(o$degeneracy)
}
}
|
#reading data from source (txt file)
data <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", na.strings ="?", stringsAsFactors = FALSE)
#subsetting the data for desired dates (1/2/2007 - 2/2/2007)
subdata <- data[(data$Date=="1/2/2007" | data$Date=="2/2/2007"),3]
#opening graphics device png
png("plot1.png", width = 480, height = 480)
#plotting histogram
hist(subdata, col = "red", xlab = "Global Active Power (kilowatts)", main = "Global Active Power")
#closing graphics device
dev.off() | /plot1.R | no_license | 867-5309/ExData_Plotting1 | R | false | false | 527 | r | #reading data from source (txt file)
data <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", na.strings ="?", stringsAsFactors = FALSE)
#subsetting the data for desired dates (1/2/2007 - 2/2/2007)
subdata <- data[(data$Date=="1/2/2007" | data$Date=="2/2/2007"),3]
#opening graphics device png
png("plot1.png", width = 480, height = 480)
#plotting histogram
hist(subdata, col = "red", xlab = "Global Active Power (kilowatts)", main = "Global Active Power")
#closing graphics device
dev.off() |
# Libreria sencilla
library(R6) | /R/tests/file0010.R | no_license | Grandez/umlrcpp | R | false | false | 31 | r | # Libreria sencilla
library(R6) |
library(shiny)
#Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("Hello Shiny!"),
# Sidebar with a slider input for the number of bins
sidebarLayout(
sidebarPanel(
sliderInput("bin1s",
"Number of bins:",
min = 1,
max = 50,
value = 30),
sliderInput("bin2s",
"Number of bins:",
min = 1,
max = 30,
value = 15)
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("distPlot"),
plotOutput("distPlot2")
)
)
)
)
| /Week11/shinytrials_week11/UI.R | no_license | Mamasquee/LetsCode-Contents | R | false | false | 681 | r | library(shiny)
#Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("Hello Shiny!"),
# Sidebar with a slider input for the number of bins
sidebarLayout(
sidebarPanel(
sliderInput("bin1s",
"Number of bins:",
min = 1,
max = 50,
value = 30),
sliderInput("bin2s",
"Number of bins:",
min = 1,
max = 30,
value = 15)
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("distPlot"),
plotOutput("distPlot2")
)
)
)
)
|
sql <- list(
channel = odbcConnect("WSHSQLGP"),
query = list())
# consumer demograhic
sql$query$address <-
sprintf("select distinct
CMH.county, CMH.case_no,
ltrim(rtrim(address.AD_ADDR1)) as Address_line1,
ltrim(rtrim(address.AD_ADDR2)) as Address_line2,
ltrim(rtrim(C.CL_FNAME)) as Client_First,
ltrim(rtrim(C.CL_LNAME)) as client_last,
address.AD_CITY as City, address.AD_STATE as state,
left( ltrim(address.AD_ZIP), 5) as zipCode,
ltrim(rtrim(MailingAddress.AD_ADDR1)) as MailAddress_line1,
ltrim(rtrim(MailingAddress.AD_ADDR2)) as MailAddress_line2,
MailingAddress.AD_CITY as MailCity, MailingAddress.AD_STATE as Mailstate,
left( ltrim( MailingAddress.AD_ZIP), 5) as MailzipCode,
CMH.Team, CMH.MI, CMH.DD, Primary_Staff, CMH.CMH_EFFDT as CMH_Adm_date,
C.CL_HPHONE as home_Phone, C.CL_WPHONE as work_phone,
CMH.LivingArrangement,
Demo.CD_EFFDT as Last_demo_update,
Case when Guard.Case_No IS not null then 'Y' else null end as Guardian_in_sheet2
from encompass.dbo.ENCClient as ENC
join encompass.dbo.PCCClient as C on C.CL_RCDID = ENC.CL_RCDID
left join encompass.dbo.PCCClientDemographics Demo on Demo.CD_RCDID = C.CLF_CDEID
left join encompass.dbo.PCFCode ResidentialArrangement on
ResidentialArrangement.CO_RCDID = Demo.CDF_QIRESI
join encompass.dbo.tblE2_CMH_Open_Consumers CMH on CMH.Case_No = cast( C.CL_CASENO as Int)
left join encompass.dbo.PCFAddress address on address.AD_RCDID = C.CLF_ADRID
left join encompass.dbo.PCFAddress MailingAddress on Mailingaddress.AD_RCDID = ENC.CLF_ADRID
left join tblE2_Bio_Psych_Social Bio on Bio.county like
'Washtenaw' and Bio.case_no = CMH.case_no
left join encompass.dbo.E2_Fn_CMH_Consumer_Guardian ('Washtenaw', '%2$s', '%2$s')
as Guard on Guard.Case_No = CMH.Case_No
where CMH.county like 'Washtenaw'", input$start_date, input$end_date)
# last service
sql$query$guardian <-
sprintf("select distinct
CMH.county, CMH.case_no,
ltrim(rtrim(C.CL_FNAME)) as Client_First,
ltrim(rtrim(C.CL_LNAME)) as client_last,
cast(GuardianshipType.CO_NAME as varchar(32)) as GuardianshipType,
ltrim(rtrim(Contact.CC_FNAME)) as Guardian_First,
ltrim(rtrim(Contact.CC_LNAME)) as Guardian_Last,
ltrim(rtrim(Guardian_address.AD_ADDR1)) as MailAddress_line1,
ltrim(rtrim(Guardian_address.AD_ADDR2)) as MailAddress_line2,
Guardian_address.AD_CITY as MailCity,
Guardian_address.AD_STATE as Mailstate,
Guardian_address.AD_ZIP as MailzipCode
from encompass.dbo.ENCClient ENC
join encompass.dbo.PCCClient C on C.CL_RCDID = ENC.CL_RCDID
join encompass.dbo.tblE2_CMH_Open_Consumers CMH on
CMH.Case_No = cast( C.CL_CASENO as Int) and CMH.county like 'Washtenaw'
join encompass.dbo.ENCClientContact Contact on Contact.CCF_CLTID = ENC.CL_RCDID
and CC_OKTOUSE = 'Y' and CC_EXPDT is null
join encompass.dbo.PCFCode GuardianshipType on
Contact.CCF_CCOGTP = GuardianshipType.CO_RCDID
join encompass.dbo.PCFAddress as Guardian_address on
Contact.CCF_ADRID = Guardian_address.AD_RCDID and
Guardian_address.AD_EXPDT is null
where cast(GuardianshipType.CO_NAME as varchar(32)) in
('Public Guardian', 'Family Guardian', 'Temporary Wardship',
'Court-Appointed Guardian', 'Permanent State Wardship')",
input$start_date, input$end_date)
# query and collect all results -----------------------------------------------
sql$output <- sapply(
names(sql$query),
FUN = function(x) {
output <-
sqlQuery(query = get(x, with(sql, query)),
channel = sql$channel, stringsAsFactors = FALSE)
output <- data.table(output)
# assign(x, output, envir = sql)
return(output)
},
USE.NAMES = TRUE
) | /Misc/Addresses/Code/error/1_sql_error_address.R | no_license | JamesDalrymple/CMH | R | false | false | 3,638 | r | sql <- list(
channel = odbcConnect("WSHSQLGP"),
query = list())
# consumer demograhic
sql$query$address <-
sprintf("select distinct
CMH.county, CMH.case_no,
ltrim(rtrim(address.AD_ADDR1)) as Address_line1,
ltrim(rtrim(address.AD_ADDR2)) as Address_line2,
ltrim(rtrim(C.CL_FNAME)) as Client_First,
ltrim(rtrim(C.CL_LNAME)) as client_last,
address.AD_CITY as City, address.AD_STATE as state,
left( ltrim(address.AD_ZIP), 5) as zipCode,
ltrim(rtrim(MailingAddress.AD_ADDR1)) as MailAddress_line1,
ltrim(rtrim(MailingAddress.AD_ADDR2)) as MailAddress_line2,
MailingAddress.AD_CITY as MailCity, MailingAddress.AD_STATE as Mailstate,
left( ltrim( MailingAddress.AD_ZIP), 5) as MailzipCode,
CMH.Team, CMH.MI, CMH.DD, Primary_Staff, CMH.CMH_EFFDT as CMH_Adm_date,
C.CL_HPHONE as home_Phone, C.CL_WPHONE as work_phone,
CMH.LivingArrangement,
Demo.CD_EFFDT as Last_demo_update,
Case when Guard.Case_No IS not null then 'Y' else null end as Guardian_in_sheet2
from encompass.dbo.ENCClient as ENC
join encompass.dbo.PCCClient as C on C.CL_RCDID = ENC.CL_RCDID
left join encompass.dbo.PCCClientDemographics Demo on Demo.CD_RCDID = C.CLF_CDEID
left join encompass.dbo.PCFCode ResidentialArrangement on
ResidentialArrangement.CO_RCDID = Demo.CDF_QIRESI
join encompass.dbo.tblE2_CMH_Open_Consumers CMH on CMH.Case_No = cast( C.CL_CASENO as Int)
left join encompass.dbo.PCFAddress address on address.AD_RCDID = C.CLF_ADRID
left join encompass.dbo.PCFAddress MailingAddress on Mailingaddress.AD_RCDID = ENC.CLF_ADRID
left join tblE2_Bio_Psych_Social Bio on Bio.county like
'Washtenaw' and Bio.case_no = CMH.case_no
left join encompass.dbo.E2_Fn_CMH_Consumer_Guardian ('Washtenaw', '%2$s', '%2$s')
as Guard on Guard.Case_No = CMH.Case_No
where CMH.county like 'Washtenaw'", input$start_date, input$end_date)
# last service
sql$query$guardian <-
sprintf("select distinct
CMH.county, CMH.case_no,
ltrim(rtrim(C.CL_FNAME)) as Client_First,
ltrim(rtrim(C.CL_LNAME)) as client_last,
cast(GuardianshipType.CO_NAME as varchar(32)) as GuardianshipType,
ltrim(rtrim(Contact.CC_FNAME)) as Guardian_First,
ltrim(rtrim(Contact.CC_LNAME)) as Guardian_Last,
ltrim(rtrim(Guardian_address.AD_ADDR1)) as MailAddress_line1,
ltrim(rtrim(Guardian_address.AD_ADDR2)) as MailAddress_line2,
Guardian_address.AD_CITY as MailCity,
Guardian_address.AD_STATE as Mailstate,
Guardian_address.AD_ZIP as MailzipCode
from encompass.dbo.ENCClient ENC
join encompass.dbo.PCCClient C on C.CL_RCDID = ENC.CL_RCDID
join encompass.dbo.tblE2_CMH_Open_Consumers CMH on
CMH.Case_No = cast( C.CL_CASENO as Int) and CMH.county like 'Washtenaw'
join encompass.dbo.ENCClientContact Contact on Contact.CCF_CLTID = ENC.CL_RCDID
and CC_OKTOUSE = 'Y' and CC_EXPDT is null
join encompass.dbo.PCFCode GuardianshipType on
Contact.CCF_CCOGTP = GuardianshipType.CO_RCDID
join encompass.dbo.PCFAddress as Guardian_address on
Contact.CCF_ADRID = Guardian_address.AD_RCDID and
Guardian_address.AD_EXPDT is null
where cast(GuardianshipType.CO_NAME as varchar(32)) in
('Public Guardian', 'Family Guardian', 'Temporary Wardship',
'Court-Appointed Guardian', 'Permanent State Wardship')",
input$start_date, input$end_date)
# query and collect all results -----------------------------------------------
sql$output <- sapply(
names(sql$query),
FUN = function(x) {
output <-
sqlQuery(query = get(x, with(sql, query)),
channel = sql$channel, stringsAsFactors = FALSE)
output <- data.table(output)
# assign(x, output, envir = sql)
return(output)
},
USE.NAMES = TRUE
) |
pacman::p_load(rio,
tidyverse,
magrittr,
sf,
tmap,
raster,
fasterize,
tictoc,
future,
furrr,
haven,
stargazer,
fixest,
zoo,
knitr,
magick,
stringi,
broom,
hrbrthemes,
readr,
nngeo)
path_paper <- paste0(getwd(), "/papers_jeppe/8 trade/")
path_paper
path_data <- paste0(getwd(), "/papers_jeppe/8 trade/data/")
path_data
path_graph <- paste0(getwd(), "/papers_jeppe/8 trade/tex/")
path_graph
# ---------------------------------------------------------
# Load and prepare data
# ---------------------------------------------------------
set_crs <- 3035 # define CRS
# load cities
cities <- rio::import(paste0(path_paper,
"data/df_full/",
"cities_raw.Rdata"))
# transform to spatial feature
cit_sf <- cities %>%
dplyr::select(city_id, year, latitude, longitude) %>%
st_as_sf(.,
coords = c("longitude", "latitude"),
crs = 4326) %>%
st_transform(.,
crs = set_crs)
# ----- Centennia
cent <- st_read(dsn = "5_centennia/centennia_full",
layer = "centennia_full",
crs = 4326) %>%
st_transform(.,
crs = set_crs)
# transform centennia polygons to linestring ('borders'/'lines')
cent <- cent %>%
st_cast("MULTILINESTRING")
# ---------------------------------------------------------
# Split data
# ---------------------------------------------------------
# define a sequence to do this within
year_sec <- seq(1000, 2000, 10)
# filter the city data to fit the time period
cit_sf <- cit_sf %>%
filter(year %in% year_sec)
# filter the centennia data to fit the time period
cent <- cent %>%
filter(year %in% year_sec)
# split the city data into a list
cit_list <- split(cit_sf,
f = cit_sf$year)
# split the centennia data into a list
cent_list <- split(cent,
f = cent$year)
rm(cit_sf, cities, cent)
# ---------------------------------------------------------
# Define function to measure border distance
# ---------------------------------------------------------
fun_dist <- function(cit_sf, cent, year_value, border_dist, df) {
# save year value
year_value <- cit_sf$year[1]
cit_sf <- cit_sf %>%
dplyr::mutate(row_id = row_number())
# calculate distance to nearest border
border_dist <- st_nn(cit_sf, cent, k = 1, returnDist = T, progress = FALSE)
border_dist <- border_dist$dist %>% unlist()
cit_sf <- cit_sf %>%
as.data.frame() %>%
dplyr::select(c(city_id, row_id))
# save distance data
df <- border_dist %>%
as.data.frame() %>%
dplyr::mutate(row_id = row_number(),
year = year_value) %>%
left_join(.,
cit_sf,
by = "row_id") %>%
dplyr::select(-row_id)
return(df)
}
# ---------------------------------------------------------
# Apply function
# ---------------------------------------------------------
# ----- Mac
required_MB <- 1000
options(future.globals.maxSize = required_MB*1024^2)
no_cores <- availableCores() - 2
plan(multisession, workers = no_cores)
tic()
dist_list <- furrr::future_map2(cit_list,
cent_list,
fun_dist,
.progress = T,
.options = furrr_options(seed = TRUE,
scheduling = 1))
toc()
plan(sequential)
# ---------------------------------------------------------
# Unpack data
# ---------------------------------------------------------
dist_data <- bind_rows(dist_list, .id = "year_id") %>%
dplyr::select(-year_id)
# rename variables
colnames(dist_data) <- c("border_distAll", "year", "city_id")
# ---------------------------------------------------------
# Export data
# ---------------------------------------------------------
save(dist_data,
file = paste0(path_data,
"border_data/border_distAll.Rdata"))
| /scripts/4 dist_all.R | no_license | jvieroe/ALineInTheSand | R | false | false | 4,436 | r | pacman::p_load(rio,
tidyverse,
magrittr,
sf,
tmap,
raster,
fasterize,
tictoc,
future,
furrr,
haven,
stargazer,
fixest,
zoo,
knitr,
magick,
stringi,
broom,
hrbrthemes,
readr,
nngeo)
path_paper <- paste0(getwd(), "/papers_jeppe/8 trade/")
path_paper
path_data <- paste0(getwd(), "/papers_jeppe/8 trade/data/")
path_data
path_graph <- paste0(getwd(), "/papers_jeppe/8 trade/tex/")
path_graph
# ---------------------------------------------------------
# Load and prepare data
# ---------------------------------------------------------
set_crs <- 3035 # define CRS
# load cities
cities <- rio::import(paste0(path_paper,
"data/df_full/",
"cities_raw.Rdata"))
# transform to spatial feature
cit_sf <- cities %>%
dplyr::select(city_id, year, latitude, longitude) %>%
st_as_sf(.,
coords = c("longitude", "latitude"),
crs = 4326) %>%
st_transform(.,
crs = set_crs)
# ----- Centennia
cent <- st_read(dsn = "5_centennia/centennia_full",
layer = "centennia_full",
crs = 4326) %>%
st_transform(.,
crs = set_crs)
# transform centennia polygons to linestring ('borders'/'lines')
cent <- cent %>%
st_cast("MULTILINESTRING")
# ---------------------------------------------------------
# Split data
# ---------------------------------------------------------
# define a sequence to do this within
year_sec <- seq(1000, 2000, 10)
# filter the city data to fit the time period
cit_sf <- cit_sf %>%
filter(year %in% year_sec)
# filter the centennia data to fit the time period
cent <- cent %>%
filter(year %in% year_sec)
# split the city data into a list
cit_list <- split(cit_sf,
f = cit_sf$year)
# split the centennia data into a list
cent_list <- split(cent,
f = cent$year)
rm(cit_sf, cities, cent)
# ---------------------------------------------------------
# Define function to measure border distance
# ---------------------------------------------------------
fun_dist <- function(cit_sf, cent, year_value, border_dist, df) {
# save year value
year_value <- cit_sf$year[1]
cit_sf <- cit_sf %>%
dplyr::mutate(row_id = row_number())
# calculate distance to nearest border
border_dist <- st_nn(cit_sf, cent, k = 1, returnDist = T, progress = FALSE)
border_dist <- border_dist$dist %>% unlist()
cit_sf <- cit_sf %>%
as.data.frame() %>%
dplyr::select(c(city_id, row_id))
# save distance data
df <- border_dist %>%
as.data.frame() %>%
dplyr::mutate(row_id = row_number(),
year = year_value) %>%
left_join(.,
cit_sf,
by = "row_id") %>%
dplyr::select(-row_id)
return(df)
}
# ---------------------------------------------------------
# Apply function
# ---------------------------------------------------------
# ----- Mac
required_MB <- 1000
options(future.globals.maxSize = required_MB*1024^2)
no_cores <- availableCores() - 2
plan(multisession, workers = no_cores)
tic()
dist_list <- furrr::future_map2(cit_list,
cent_list,
fun_dist,
.progress = T,
.options = furrr_options(seed = TRUE,
scheduling = 1))
toc()
plan(sequential)
# ---------------------------------------------------------
# Unpack data
# ---------------------------------------------------------
dist_data <- bind_rows(dist_list, .id = "year_id") %>%
dplyr::select(-year_id)
# rename variables
colnames(dist_data) <- c("border_distAll", "year", "city_id")
# ---------------------------------------------------------
# Export data
# ---------------------------------------------------------
save(dist_data,
file = paste0(path_data,
"border_data/border_distAll.Rdata"))
|
// ARM922T CP15 Register Definition File
//
INCLUDE "armbase32.rd"
REG=cp15_cpuid 0x00000000 COPROC15 4 // c15_0_o0_0 (c15_0) : CPU ID
REG=cp15_cache 0x00000040 COPROC15 4 // c15_0_o1_0 : Cache Type Register
REG=cp15_cntrl 0x00000004 COPROC15 4 // c15_1_o0_0 (c15_1) : Control Register
REG=cp15_ttb 0x00000008 COPROC15 4 // c15_2_o0_0 (c15_2) : Translation Table Base
REG=cp15_domac 0x0000000C COPROC15 4 // c15_3_o0_0 (c15_3) : Domain Access Control
REG=cp15_dfsr 0x00000014 COPROC15 4 // c15_5_o0_0 (c15_5) : Fault Status Register, Data
REG=cp15_ifsr 0x00000054 COPROC15 4 // c15_5_o1_0 : Fault Status Register, Inst Prefetch
REG=cp15_far 0x00000018 COPROC15 4 // c15_6_o0_0 (c15_6) : Fault Address Register, Data
REG=cp15_dclock 0x00000024 COPROC15 4 // c15_9_o0_0 (c15_9) : DCache Lockdown
REG=cp15_iclock 0x00000064 COPROC15 4 // c15_9_o1_0 : ICache Lockdown
REG=cp15_dtlblk 0x00000028 COPROC15 4 // c15_10_o0_0 (c15_10): Data TLB Lockdown
REG=cp15_itlblk 0x00000068 COPROC15 4 // c15_10_o1_0 : Instruction TLB Lockdown
REG=cp15_fcsepid 0x00000034 COPROC15 4 // c15_13_o0_0 (c15_13): Fast Context Switch (FSE) Process ID (PID)
//
REG_FIELD=cp15_cpuid imp 31 24, spec 23 20, arch 19 16, part 15 4, layout 3 0
REG_FIELD=cp15_cache ctype 28 25, s 24 24, dsize 20 18, dass 17 15, dm 14 14, dlen 13 12, isize 8 6, iass 5 3, im 2 2, ilen 1 0
REG_FIELD=cp15_cntrl ia 31 31, nf 30 30, rr 14 14, v 13 13, i 12 12, r 9 9, s 8 8, b 7 7, c 2 2, a 1 1, m 0 0
REG_FIELD=cp15_domac d15 31 30, d14 29 28, d13 27 26, d12 25 24, d11 23 22, d10 21 20, d09 19 18, d08 17 16, d07 15 14, d06 13 12, d05 11 10, d04 9 8, d03 7 6, d02 5 4, d01 3 2, d00 1 0
REG_FIELD=cp15_dfsr domain 7 4, fault 3 0
REG_FIELD=cp15_ifsr domain 7 4, fault 3 0
REG_FIELD=cp15_dclock index 31 26
REG_FIELD=cp15_iclock index 31 26
REG_FIELD=cp15_dtlblk base 31 26, victim 25 20, p 0 0
REG_FIELD=cp15_itlblk base 31 26, victim 25 20, p 0 0
REG_FIELD=cp15_fcsepid fcsepid 31 25
//
// Add CP15 to Register Window
REG_WINDOW = CP15 cp15_cpuid, cp15_cache, cp15_cntrl, cp15_ttb, cp15_domac, cp15_dfsr, cp15_ifsr, cp15_far, cp15_dclock, cp15_iclock, cp15_dtlblk, cp15_itlblk, cp15_fcsepid
//
// <eof>
| /i686-pc-linux-gnu/arm-xilinx-linux-gnueabi/mep/bin/arm/arm922t.rd | permissive | Kayuii/arm_xilinx_linux_gnueabi | R | false | false | 2,255 | rd | // ARM922T CP15 Register Definition File
//
INCLUDE "armbase32.rd"
REG=cp15_cpuid 0x00000000 COPROC15 4 // c15_0_o0_0 (c15_0) : CPU ID
REG=cp15_cache 0x00000040 COPROC15 4 // c15_0_o1_0 : Cache Type Register
REG=cp15_cntrl 0x00000004 COPROC15 4 // c15_1_o0_0 (c15_1) : Control Register
REG=cp15_ttb 0x00000008 COPROC15 4 // c15_2_o0_0 (c15_2) : Translation Table Base
REG=cp15_domac 0x0000000C COPROC15 4 // c15_3_o0_0 (c15_3) : Domain Access Control
REG=cp15_dfsr 0x00000014 COPROC15 4 // c15_5_o0_0 (c15_5) : Fault Status Register, Data
REG=cp15_ifsr 0x00000054 COPROC15 4 // c15_5_o1_0 : Fault Status Register, Inst Prefetch
REG=cp15_far 0x00000018 COPROC15 4 // c15_6_o0_0 (c15_6) : Fault Address Register, Data
REG=cp15_dclock 0x00000024 COPROC15 4 // c15_9_o0_0 (c15_9) : DCache Lockdown
REG=cp15_iclock 0x00000064 COPROC15 4 // c15_9_o1_0 : ICache Lockdown
REG=cp15_dtlblk 0x00000028 COPROC15 4 // c15_10_o0_0 (c15_10): Data TLB Lockdown
REG=cp15_itlblk 0x00000068 COPROC15 4 // c15_10_o1_0 : Instruction TLB Lockdown
REG=cp15_fcsepid 0x00000034 COPROC15 4 // c15_13_o0_0 (c15_13): Fast Context Switch (FSE) Process ID (PID)
//
REG_FIELD=cp15_cpuid imp 31 24, spec 23 20, arch 19 16, part 15 4, layout 3 0
REG_FIELD=cp15_cache ctype 28 25, s 24 24, dsize 20 18, dass 17 15, dm 14 14, dlen 13 12, isize 8 6, iass 5 3, im 2 2, ilen 1 0
REG_FIELD=cp15_cntrl ia 31 31, nf 30 30, rr 14 14, v 13 13, i 12 12, r 9 9, s 8 8, b 7 7, c 2 2, a 1 1, m 0 0
REG_FIELD=cp15_domac d15 31 30, d14 29 28, d13 27 26, d12 25 24, d11 23 22, d10 21 20, d09 19 18, d08 17 16, d07 15 14, d06 13 12, d05 11 10, d04 9 8, d03 7 6, d02 5 4, d01 3 2, d00 1 0
REG_FIELD=cp15_dfsr domain 7 4, fault 3 0
REG_FIELD=cp15_ifsr domain 7 4, fault 3 0
REG_FIELD=cp15_dclock index 31 26
REG_FIELD=cp15_iclock index 31 26
REG_FIELD=cp15_dtlblk base 31 26, victim 25 20, p 0 0
REG_FIELD=cp15_itlblk base 31 26, victim 25 20, p 0 0
REG_FIELD=cp15_fcsepid fcsepid 31 25
//
// Add CP15 to Register Window
REG_WINDOW = CP15 cp15_cpuid, cp15_cache, cp15_cntrl, cp15_ttb, cp15_domac, cp15_dfsr, cp15_ifsr, cp15_far, cp15_dclock, cp15_iclock, cp15_dtlblk, cp15_itlblk, cp15_fcsepid
//
// <eof>
|
#some functions
getNonHAEIdx_iter <- function(iter, trainIdxIterIDNonHAE){
nonHAE_idx <- unlist(lapply(trainIdxIterIDNonHAE, function(x){
idx_iter <- x[x[, 2]==iter, 1]
return(idx_iter)
}))
#9860
return(nonHAE_idx)
}
createCurve <- function(resp, pred, recall_tar){
predobj <- prediction(pred, resp)
#add plot
perf <- performance(predobj, 'prec', 'rec') # added by jie for recall-precision plot.
recall <- perf@x.values[[1]]
precision <- perf@y.values[[1]]
auc <- performance(predobj, 'auc')@y.values[[1]]
rePrec <- cbind(recall, precision)
bucket <- cut(recall, breaks=seq(0, 1, 0.005), include.lowest=T,right=F)
rec_prec_byBucket <- aggregate(rePrec, by=list(bucket), function(i)mean(i, na.rm=T))
##in simulation
#recall<- c(0.05, 0.1, 0.25, 0.5)
temp4 <- unlist(lapply(recall_tar, function(X){
idx <- which(abs(rec_prec_byBucket[, 2]-X)==min(abs(rec_prec_byBucket[, 2]-X), na.rm=T))
prec_sel <- rec_prec_byBucket[idx, 3]
return(prec_sel)
}))
##end
return(temp4)
}
createCurve_v2 <- function(resp, pred, recall_tar){
predobj <- prediction(pred, resp)
#add plot
perf <- performance(predobj, 'prec', 'rec') # added by jie for recall-precision plot.
recall <- perf@x.values[[1]]
precision <- perf@y.values[[1]]
auc <- performance(predobj, 'auc')@y.values[[1]]
rePrec <- cbind(recall, precision)
bucket <- cut(recall, breaks=seq(0, 1, 0.005), include.lowest=T,right=F)
rec_prec_byBucket <- aggregate(rePrec, by=list(bucket), function(i)mean(i, na.rm=T))
##in simulation
#recall<- c(0.05, 0.1, 0.25, 0.5)
temp4 <- unlist(lapply(recall_tar, function(X){
idx <- which(abs(rePrec[, 1]-X)==min(abs(rePrec[, 1]-X), na.rm=T))[1]
prec_sel <- rePrec[idx, 2]
return(prec_sel)
}))
##end
return(temp4)
}
msOnTest <- function(pred, response, recall_tar){
temp3 <- lapply(1:nrow(pred), function(i){
predobj <- prediction(pred[i,], response)
#add plot
perf <- performance(predobj, 'prec', 'rec') # added by jie for recall-precision plot.
recall <- perf@x.values[[1]]
precision <- perf@y.values[[1]]
auc <- performance(predobj, 'auc')@y.values[[1]]
bucket <- cut(recall, breaks=seq(0, 1, 0.005), include.lowest=T,right=F)
rec_prec <- cbind(recall, precision)
rec_prec_byBucket <- aggregate(rec_prec, by=list(bucket), function(i)mean(i, na.rm=T))
write.xlsx(rec_prec_result, paste('Curve_sepIter.xlsx', sep=''),
sheetName=paste("Iter_",i), row.names=F, append=T)
##in simulation
temp4 <- unlist(lapply(recall_tar, function(X){
idx <- which(abs(rec_prec_byBucket[, 2]-X)==min(abs(rec_prec_byBucket[, 2]-X), na.rm=T))
prec_sel <- rec_prec_byBucket[idx, 3]
return(prec_sel)
}))
##end
return(list(auc=auc, rec_prec_byBucket=rec_prec_byBucket, ppv=temp4))
})
auc_mean <- mean(unlist(lapply(1:10, function(X)temp3[[X]][[1]])), na.rm=T)
ppv_list <- lapply(temp3, function(X){
return(X[[3]])
})
ppv_df <- ldply(ppv_mean, quickdf)
ppv_mean <- apply(ppv_df, 2, mean, na.rm=T)
ms <- c(auc_mean, ppv_mean)
names(ms) <- c('auc', paste("PPV(recall=", recall_tar,')', sep=''))
return(ms)
}
msOnTest_dong <- function(pred, response, recall_tar){
pred <- apply(pred, 1, mean, na.rm=T)
predobj <- prediction(pred, response)
#add plot
perf <- performance(predobj, 'prec', 'rec') # added by jie for recall-precision plot.
recall <- perf@x.values[[1]]
precision <- perf@y.values[[1]]
auc <- performance(predobj, 'auc')@y.values[[1]]
rec_prec <- data.frame(recall=recall, precision=precision)
rec_prec_omitMiss <- rec_prec[complete.cases(rec_prec),]
aupr <- trapz(rec_prec_omitMiss$recall, rec_prec_omitMiss$precision)
bucket <- cut(recall, breaks=seq(0, 1, 0.005), include.lowest=T,right=F)
rec_prec_byBucket <- aggregate(rec_prec, by=list(bucket), function(i)mean(i, na.rm=T))
#write.csv(rec_prec_byBucket, paste('Curve_dong.csv', sep=''),
# row.names=F, quote=T)
#plot
pdf(file=paste('recall-precision curve on test.pdf', sep=''))
#plot(recall, precision, type='l', main=paste('recall-precision curve'))
plot(perf)
dev.off()
##in simulation
temp4 <- unlist(lapply(recall_tar, function(X){
idx <- which(abs(rec_prec_byBucket[, 2]-X)==min(abs(rec_prec_byBucket[, 2]-X), na.rm=T))
prec_sel <- rec_prec_byBucket[idx, 3]
return(prec_sel)
}))
##end
ms <- c(auc, aupr, temp4)
names(ms) <- c('auc',"aupr", paste("PPV(recall=", recall_tar,')', sep=''))
return(list(ms=ms, curve=rec_prec_byBucket))
}
msOnTest_sep_v2 <- function(pred, response, recall_tar){
#pred <- apply(pred, 1, mean, na.rm=T)
predobj <- prediction(pred, response)
#add plot
perf <- performance(predobj, 'ppv', 'sens') # added by jie for recall-precision plot.
recall <- perf@x.values[[1]]
precision <- perf@y.values[[1]]
auc <- performance(predobj, 'auc')@y.values[[1]]
rec_prec <- data.frame(recall=recall, precision=precision)
rec_prec_omitMiss <- rec_prec[complete.cases(rec_prec),]
aupr <- trapz(rec_prec_omitMiss$recall, rec_prec_omitMiss$precision)
bucket <- cut(recall, breaks=seq(0, 1, 0.01), include.lowest=T,right=F)
rec_prec_byBucket <- aggregate(rec_prec, by=list(bucket), function(i)mean(i, na.rm=T))
#write.csv(rec_prec_byBucket, paste('Curve_dong.csv', sep=''),
# row.names=F, quote=T)
#plot
pdf(file=paste('recall-precision curve on test.pdf', sep=''))
#plot(recall, precision, type='l', main=paste('recall-precision curve'))
plot(perf)
dev.off()
##in simulation
temp4 <- unlist(lapply(recall_tar, function(X){
#idx <- sample(rep(which(abs(rec_prec[, 1]-X)==min(abs(rec_prec[, 1]-X), na.rm=T)), 2), 1)
idx=which(abs(rec_prec[, 1]-X)==min(abs(rec_prec[, 1]-X), na.rm=T))[1]
prec_sel <- rec_prec[idx, 2]
return(prec_sel)
}))
##end
ms <- c(auc, aupr, temp4)
names(ms) <- c('auc',"aupr", paste("PPV(recall=", recall_tar,')', sep=''))
return(list(ms=ms, curve=rec_prec_byBucket, rec_prec=rec_prec))
}
msOnTest_sep <- function(pred, response, recall_tar){
#pred <- apply(pred, 1, mean, na.rm=T)
predobj <- prediction(pred, response)
#add plot
perf <- performance(predobj, 'prec', 'rec') # added by jie for recall-precision plot.
recall <- perf@x.values[[1]]
precision <- perf@y.values[[1]]
auc <- performance(predobj, 'auc')@y.values[[1]]
rec_prec <- data.frame(recall=recall, precision=precision)
rec_prec_omitMiss <- rec_prec[complete.cases(rec_prec),]
aupr <- trapz(rec_prec_omitMiss$recall, rec_prec_omitMiss$precision)
bucket <- cut(recall, breaks=seq(0, 1, 0.005), include.lowest=T,right=F)
rec_prec_byBucket <- aggregate(rec_prec, by=list(bucket), function(i)mean(i, na.rm=T))
#write.csv(rec_prec_byBucket, paste('Curve_dong.csv', sep=''),
# row.names=F, quote=T)
#plot
pdf(file=paste('recall-precision curve on test.pdf', sep=''))
#plot(recall, precision, type='l', main=paste('recall-precision curve'))
plot(perf)
dev.off()
##in simulation
temp4 <- unlist(lapply(recall_tar, function(X){
idx <- which(abs(rec_prec_byBucket[, 2]-X)==min(abs(rec_prec_byBucket[, 2]-X), na.rm=T))
prec_sel <- rec_prec_byBucket[idx, 3]
return(prec_sel)
}))
##end
ms <- c(auc, aupr, temp4)
names(ms) <- c('auc',"aupr", paste("PPV(recall=", recall_tar,')', sep=''))
return(list(ms=ms, curve=rec_prec_byBucket))
}
getBkt <- function(pred, resp, n.bkt){
predobj <- prediction(pred, resp)
#add plot
perf <- performance(predobj, 'prec', 'rec') # added by jie for recall-precision plot.
recall <- perf@x.values[[1]]
precision <- perf@y.values[[1]]
rec_prec <- cbind(recall, precision)
auc <- performance(predobj, 'auc')@y.values[[1]]
rec_prec_omitMiss <- rec_prec[complete.cases(rec_prec),]
aupr <- trapz(rec_prec_omitMiss[, 1], rec_prec_omitMiss[, 2])
bucket <- cut(recall, breaks=seq(0, 1, 1/n.bkt), include.lowest=T,right=F)
rec_prec_byBucket <- aggregate(rec_prec, by=list(bucket), function(i)mean(i, na.rm=T))
return(list(bkt <- rec_prec_byBucket, ms <- c(auc=auc, aupr=aupr)))
}
get_out_of_sample_curve <- function(wt, lmd, foldid, response,data_mtx){
curve_fromCV <- lapply(1:k.folds, function(i){
dataset_cvi <- create_tr_ts_forCVi(haeIdx_iter, nonhaeIdx_iter, foldid_hae_cv, trainIdxIterIDNonHAE, i)
train_cvi <- dataset_cvi[[2]]
test_cvi <- dataset_cvi[[1]]
cv_training_resp <- train_cvi$response
cv_training_matrix<- model.matrix(response~., data=train_cvi)[, -1]
cv_test_resp<- test_cvi$response # select 1 fold as test data
cv_test_matrix<- model.matrix(response~., data=test_cvi)[, -1]
wt_vct <- ifelse(cv_training_resp==1, 1, wt)
fit_lasso<- glmnet(cv_training_matrix, cv_training_resp, weights=wt_vct,
lambda=lmd, family="binomial", alpha=1, standardize=F)
test_pred<- predict(fit_lasso, cv_test_matrix, type="response") #2713 599
curve <- data.frame(resp=cv_test_resp, pred=test_pred)
return(curve)
})
return(curve_fromCV)
}
grid_search_v2 <- function(r, training_data_lasso){
i <- grid[r, 2]
j <- grid[r, 1]
cv_training_data_lasso<- training_data_lasso[foldid!=i,] # select 9 folds as training data
cv_training_matrix<- model.matrix(response~., data=cv_training_data_lasso)[,-1]
cv_test_data_lasso<- training_data_lasso[foldid==i,] # select 1 fold as test data
cv_test_matrix<- model.matrix(response~., data=cv_test_data_lasso)[,-1]
fit_lasso<- glmnet(cv_training_matrix, cv_training_data_lasso$response,
lambda=lambda_seq, family="binomial", alpha=1,
weights = ifelse(cv_training_data_lasso$response==1, 1, j),
standardize=F)
test_pred<- predict(fit_lasso, cv_test_matrix, type="response")
test_pred_avg<- apply(test_pred, 2, function(x){createCurve(cv_test_data_lasso$response , x, crit)})
test_pred_avg<- c(test_pred_avg , rep(NA , length(lambda_seq) - length(test_pred_avg))) # some small lambda may not be reached
cv_auc<- c(wt=j, fold=i, ms=test_pred_avg) # calculate the AUC based on left-out fold
return(cv_auc)
}#end for grid
grid_search_1fold_svm_findPred <- function(r, data, data_ts){
cost <- grid[r, 1]
wt <- grid[r, 2]
wts <- list( c("NH"=1,"H"=wt) )
resp_ts <- ifelse(data_ts$response=='H', 1, 0)
x_ts <- data_ts[, -match('response', names(data_ts))]
cat(file = traceFile, append=T, 'iter-', iter , ' wt-', wt, ' cost-', cost, ' start!\n')
start <- proc.time()
load(paste('pred_wt(', wt,')cost(', cost, ').RData', sep=''))
svmFit <- pred_model$model_onSubTr
cat(file=traceFile, append=T, 'iter-', iter , ' wt-', wt, ' cost-', cost, ' svm training on subtrain loading end! '
, 'time used-', (proc.time()-start)[3]/60, 'min! '
, 'ending time-', date() ,'\n'
)
pred <- predict(svmFit, x_ts , sclae=T,decision.values = TRUE)
cat(file=traceFile, append=T,'iter-', iter , ' wt-', wt, ' cost-', cost, ' pred on left out fold end! '
, 'time used-', (proc.time()-start)[3]/60, 'min! '
, 'ending time-', date() ,'\n'
)
predscore <- attr(pred, "decision.values")
pred_model <- list(model_onSubTr=svmFit, pred_ts = predscore)
save(file=paste('pred_wt(', wt,')cost(', cost, ')_v2.RData', sep=''), pred_model)
ms <- createCurve(resp = resp_ts, pred=predscore, recall_tar = crit)
grid_ms = c(cost=cost, weight=wt, ms=ms)
cat(file=traceFile, append=T,'iter-', iter , ' wt-', wt, ' cost-', cost, ' ms-', ms, '\n')
return(list(grid_ms = grid_ms, model_onSubTr=svmFit, pred_ts = pred))
}
grid_search_1fold_svm <- function(r, data, data_ts){
cost <- grid[r, 1]
wt <- grid[r, 2]
wts <- list( c("NH"=1,"H"=wt) )
resp_ts <- ifelse(data_ts$response=='H', 1, 0)
x_ts <- data_ts[, -match('response', names(data_ts))]
cat(file = traceFile, append=T, 'iter-', iter , ' wt-', wt, ' cost-', cost, ' start!\n')
start <- proc.time()
svmFit <- svm(
response~., data=data
#TrainData, TrainClasses
, cost=cost, class.weights = wts[[1]]
, type="C-classification", kernel='linear'
, scale=T
)
cat(file=traceFile, append=T, 'iter-', iter , ' wt-', wt, ' cost-', cost, ' svm training on subtrain end! '
, 'time used-', (proc.time()-start)[3]/60, 'min! '
, 'ending time-', date() ,'\n'
)
pred <- predict(svmFit, x_ts , sclae=T,decision.values = TRUE)
cat(file=traceFile, append=T,'iter-', iter , ' wt-', wt, ' cost-', cost, ' pred on left out fold end! '
, 'time used-', (proc.time()-start)[3]/60, 'min! '
, 'ending time-', date() ,'\n'
)
predscore <- attr(pred, "decision.values")
pred_model <- list(model_onSubTr=svmFit, pred_ts = predscore)
save(file=paste('pred_wt(', wt,')cost(', cost, ').RData', sep=''), pred_model)
ms <- createCurve(resp = resp_ts, pred=predscore, recall_tar = crit)
grid_ms = c(cost=cost, weight=wt, ms=ms)
cat(file=traceFile, append=T,'iter-', iter , ' wt-', wt, ' cost-', cost, ' ms-', ms, '\n')
return(list(grid_ms = grid_ms, model_onSubTr=svmFit, pred_ts = pred))
}
get_optimum_model_iter <- function(haeIdx_iter, nonhaeIdx_iter, foldid_hae_cv, trainIdxIterIDNonHAE, traceFile, grid, num_pros){
dataset_cvi <- create_tr_ts_forCVi(haeIdx_iter_forCV, nonhaeIdx_iter_forCV, foldid_hae_cv, trainIdxIterIDNonHAE, i)
#cv_training_data_lasso <- dataset_cvi[[2]]
#cv_test_data_lasso <- dataset_cvi[[1]]
cv_data <- dataset_cvi$cv_data_lasso
cv_tr_flag <- dataset_cvi$tr_flag
cv_data$response <- as.factor(ifelse(cv_data$response==1,'H', 'NH'))
cv_data$region <- as.numeric(as.factor(cv_data$region))
cv_data$gender <- as.numeric(as.factor(cv_data$gender))
cv_data_tr <- cv_data[cv_tr_flag,] #[1] 127032 242
cv_data_ts <- cv_data[!cv_tr_flag,] #[1] 31356 242
cv_resp_ts <- cv_data_ts$response
#generate dummy bariables for catigorical varibles(i.e. gender and region)
cati_var <- c('gender', 'region')
dummy_df <- model.matrix(response~., data=cv_data_tr[, match(cati_var, names(cv_data_tr))]
)
#scale the 4/5 folds data and save the mean and sd to apply on 1/5 fold scaling
conti_var <- c('age', 'lookback_days', grep('freq$', names(cv_data_tr), value=T))
mean_sd <- lapply(conti_var, function(v){
var <- cv_data_tr[, v]
var_scale <- scale(var)
mean <- mean(var)
sd <- sd(var)
return(c(mean=mean, sd=sd))
})
conti_scale_df <- scale(cv_data_tr[, match(conti_var, name(cv_data_cv))])
cv_data_tr_scale <- cbind(cv_data_tr[, -match(c(conti_var, cati_var),names(cv_data_cv))], dummy_df, conti_scale_df)
#num_pros <- Sys.getenv('NUMBER_OF_PROCESSORS')
sfInit(parallel=TRUE, cpus=num_pros, type='SOCK')
sfSource("D:\\Shire_project\\03_code\\Jie\\svm\\svm_subFunc_Dec03.R")
cat(file=traceFile, append=TRUE, 'iter:', iter, ' sfExport running!\n')
sfExport('cv_data_tr', 'crit', 'cv_data_ts'
, 'grid', 'iter', 'traceFile')
sfExport('createCurve', 'grid_search_1fold_svm')
#sfClusterEval(library("glmnet"))
sfClusterEval(library("e1071"))
sfClusterEval(library("ROCR"))
sfClusterEval(library("plyr"))
sfClusterEval(library("dplyr"))
cat(file=traceFile, append=TRUE, 'iter:', iter, ' parallele grid search running!\n')
ms_fromGrid <- sfClusterApplyLB(1:nrow(grid), grid_search_1fold_svm, cv_data_tr, cv_data_ts)
sfStop()
cat(file=traceFile, append=TRUE, 'iter:', iter, ' parallele grid search end!\n')
save(file='fullRecords.RData', ms_fromGrid)
ms_list <- lapply(ms_fromGrid, function(X)X[[1]])
ms_df <- ldply(ms_list, quickdf)
cat(file=traceFile, append=TRUE, 'iter:', iter, ' run over for initial grid search!\n')
return(ms_df)
}
grid_search_cv_svm <- function(r){
cost <- grid[r, 1]
wt <- grid[r, 2]
wts <- list( c("NH"=1,"H"=wt) )
cat(file = traceFile, append=T, 'iter-', iter , ' wt-', wt, ' cost-', cost, ' start!\n')
start1 <- proc.time()
ms_onCV <- lapply(1:k.folds, function(i){
#load the cv data for cv i
load(paste(path_sepData, '\\dataset_cv', i, '.RData', sep=''))
cv_data_tr <- cv_trTs_norm$tr #[1] 127032 242
cv_data_tr$response <- as.factor(ifelse(cv_data_tr$response==1, 'H', 'NH'))
cv_data_ts <- cv_trTs_norm$ts #[1] 31356 242
cv_resp_ts <- cv_data_ts$response
cv_x_ts <- cv_data_ts[, -match('response', names(cv_data_ts))]
cat(file=traceFile, append=T, 'iter-', iter , ' wt-', wt, ' cost-', cost, ' Fold-', i, ' svm training on subtrain start! \n')
start <- proc.time()
svmFit <- svm(
response~., data=cv_data_tr
#TrainData, TrainClasses
, cost=cost, class.weights = wts[[1]]
, type="C-classification", kernel='linear'
, scale=F
)
save(file=paste('svmModel_cost_', cost, '_wt_', wt, '_fold_', i, '.RData', sep=''), svmFit)
cat(file=traceFile, append=T, 'iter-', iter , ' wt-', wt, ' cost-', cost, ' Fold-', i, ' svm training on subtrain end! '
, 'time used-', (proc.time()-start1)[3]/60, 'min! '
, 'ending time-', date() ,'\n'
)
pred <- predict(svmFit, cv_x_ts , scale=F,decision.values = TRUE)
cat(file=traceFile, append=T,'iter-', iter , ' wt-', wt, ' cost-', cost, ' Fold-', i, ' pred on left out fold end! '
, 'time used-', (proc.time()-start1)[3]/60, 'min! '
, 'ending time-', date() ,'\n'
)
predscore <- attr(pred, "decision.values")
pred_model <- list(model_onSubTr=svmFit, resp_pred_ts = data.frame(resp=cv_resp_ts, pred=predscore))
save(file=paste('pred_wt(', wt,')cost(', cost, ')Fold',i, '.RData', sep=''), pred_model)
ms <- createCurve_v2(resp = cv_resp_ts, pred=predscore, recall_tar = crit)
grid_ms = c(cost=cost, weight=wt, fold=i, ms=ms)
cat(file=traceFile, append=T,'iter-', iter , ' wt-', wt, ' cost-', cost, ' Fold-', i, ' ms-', ms, '\n')
return( grid_ms)
})
ms_onCV_df <- ldply(ms_onCV, quickdf)
ms_avg <- aggregate(ms_onCV_df$ms, by=list(cost=ms_onCV_df$cost, wt=ms_onCV_df$weight), mean, na.rm=T)
names(ms_avg) <- c('cost', 'wt', 'ms')
cat(file=traceFile, append=T,'iter-', iter , ' wt-', wt, ' cost-', cost, ' ms_avg-', ms_avg[1, 3], '\n')
return(ms_avg)
}
par_onCV <- function(i){
#load the cv data for cv i
load(paste(path_sepData, '\\dataset_cv', i, '.RData', sep=''))
cv_data_tr <- cv_trTs_norm$tr #[1] 127032 242
cv_data_tr$response <- as.factor(ifelse(cv_data_tr$response==1, 'H', 'NH'))
cv_data_ts <- cv_trTs_norm$ts #[1] 31356 242
cv_resp_ts <- cv_data_ts$response
cv_x_ts <- cv_data_ts[, -match('response', names(cv_data_ts))]
cat(file=traceFile, append=T, 'iter-', iter , ' wt-', wt, ' cost-', cost, ' gamma-', gamma,' Fold-', i, ' svm training on subtrain start! \n')
start1 <- proc.time()
if(kernel=='lin'){
svmFit <- svm(
response~., data=cv_data_tr
#TrainData, TrainClasses
, cost=cost, class.weights = wts[[1]]
, type=clsMethod, kernel='linear'
, scale=F
)
}else if(kernel=='rbf'){
svmFit <- svm(
response~., data=cv_data_tr
#TrainData, TrainClasses
, cost=cost, class.weights = wts[[1]]
, gamma=gamma
, type=clsMethod, kernel='radial'
, scale=F
)
}
#load(file=paste('svmModel_cost_', cost, '_wt_', wt, '_fold_', i, '.RData', sep=''))
cat(file=traceFile, append=T, 'iter-', iter , ' wt-', wt, ' cost-', cost,' gamma-', gamma, ' Fold-', i, ' svm training on subtrain end! '
, 'time used-', (proc.time()-start1)[3]/60, 'min! '
, 'ending time-', date() ,'\n'
)
pred <- predict(svmFit, cv_x_ts , scale=F,decision.values = TRUE)
cat(file=traceFile, append=T,'iter-', iter , ' wt-', wt, ' cost-', cost, ' gamma-', gamma, ' Fold-', i, ' pred on left out fold end! '
, 'time used-', (proc.time()-start1)[3]/60, 'min! '
, 'ending time-', date() ,'\n'
)
predscore <- attr(pred, "decision.values")
pred_model <- list(model_onSubTr=svmFit, resp_pred_ts = data.frame(resp=cv_resp_ts, pred=predscore))
save(file=paste('pred_wt(', wt,')cost(', cost, ')gm(', gamma, ')Fold',i, '.RData', sep=''), pred_model)
ms <- createCurve(resp = cv_resp_ts, pred=predscore, recall_tar = crit)
if(kernel=='lin'){
grid_ms = c(cost=cost, weight=wt, fold=i, ms=ms)
}else if(kernel=='rbf'){
grid_ms = c(cost=cost, gamma=gamma, weight=wt, fold=i, ms=ms)
}
cat(file=traceFile, append=T,'iter-', iter , ' wt-', wt, ' cost-', cost,' gamma-', gamma, ' Fold-', i, ' ms-', ms, '\n')
return( grid_ms)
}
grid_search_cv_svm_par <- function(r){
if(kernel=='lin'){
cost <- grid[r, 1]
wt <- grid[r, 2]
gamma <- 'NA'
}else if(kernel=='rbf'){
cost <- grid[r, 1]
gamma <- grid[r, 2]
wt <- grid[r, 3]
}
wts <- list( c("NH"=1,"H"=wt) )
cat(file = traceFile, append=T, 'iter-', iter , ' wt-', wt, ' cost-', cost, ' gamma-', gamma, ' start!\n')
start1 <- proc.time()
#num_pros <- Sys.getenv('NUMBER_OF_PROCESSORS')
sfInit(parallel=TRUE, cpus=k.folds, type='SOCK')
sfSource("D:\\Shire_project\\03_code\\Jie\\svm\\RBF\\svm_subFunc_Dec03_normByRow_rbf.R")
cat(file=traceFile, append=TRUE, 'iter:', iter, ' cost-', cost,' gamma-', gamma, ' wt-', wt, ' sfExport running!\n')
sfExport('grid', 'cost', 'wt', 'wts', 'iter', 'traceFile', 'crit', 'k.folds', 'path_sepData'
, 'ONE', 'kernel', 'clsMethod', 'gamma')
sfExport('createCurve_v2', 'par_onCV')
#sfClusterEval(library("glmnet"))
sfClusterEval(library("e1071"))
sfClusterEval(library("ROCR"))
sfClusterEval(library("plyr"))
sfClusterEval(library("dplyr"))
cat(file=traceFile, append=TRUE, 'iter:', iter, ' cost-', cost, 'wt-', wt, ' gamma-', gamma, ' parallele onCV running start!\n')
ms_onCV <- sfClusterApplyLB(1:k.folds, par_onCV)
sfStop()
cat(file=traceFile, append=TRUE, 'iter:', iter, ' cost-', cost, 'wt-', wt,' gamma-', gamma, ' parallele onCV running end!', 'usedTime-', (proc.time()-start1)[3]/60, 'min\n')
ms_onCV_df <- ldply(ms_onCV, quickdf)
if(kernel=='lin'){
ms_avg <- aggregate(ms_onCV_df$ms, by=list(cost=ms_onCV_df$cost, wt=ms_onCV_df$weight), mean, na.rm=T)
names(ms_avg) <- c('cost', 'wt', 'ms')
}else if(kernel=='rbf'){
ms_avg <- aggregate(ms_onCV_df$ms, by=list(cost=ms_onCV_df$cost, gamma=ms_onCV_df$gamma, wt=ms_onCV_df$weight), mean, na.rm=T)
names(ms_avg) <- c('cost', 'gamma', 'wt', 'ms')
}
cat(file=traceFile, append=T,'iter-', iter , ' wt-', wt, ' cost-', cost,' gamma-', gamma, ' ms_avg-', ms_avg[1, ncol(ms_avg)], '\n')
return(ms_avg)
}
get_optimum_model_iter_withCV <- function(traceFile, path_sepData, num_pros){
#num_pros <- Sys.getenv('NUMBER_OF_PROCESSORS')
sfInit(parallel=TRUE, cpus=num_pros, type='SOCK')
sfSource("D:\\Shire_project\\03_code\\Jie\\svm\\RBF\\svm_subFunc_Dec03_normByRow_rbf.R")
cat(file=traceFile, append=TRUE, 'iter:', iter, ' sfExport running!\n')
sfExport('grid', 'iter', 'traceFile', 'crit', 'k.folds', 'path_sepData'
, 'ONE', 'kernel', 'clsMethod')
sfExport('createCurve_v2', 'grid_search_cv_svm_par')
#sfClusterEval(library("glmnet"))
sfClusterEval(library("e1071"))
sfClusterEval(library("ROCR"))
sfClusterEval(library("plyr"))
sfClusterEval(library("dplyr"))
sfClusterEval(library('snowfall'))
cat(file=traceFile, append=TRUE, 'iter:', iter, ' parallele grid search running!\n')
ms_allGrid <- sfClusterApplyLB(1:nrow(grid), grid_search_cv_svm_par)
sfStop()
cat(file=traceFile, append=TRUE, 'iter:', iter, ' parallele grid search end!\n')
save(file='fullRecords.RData', ms_allGrid)
ms_allGrid <- ldply(ms_allGrid, quickdf)
#choose the best parameter
opt_lines <- ms_allGrid[ms_allGrid$ms==max(ms_allGrid$ms, na.rm = T),]
opt_line <- opt_lines[sample(rep(1:nrow(opt_lines), 2), 1), ]
opt_cost <- opt_line$cost
opt_wt <- opt_line$wt
max_ms <- opt_line$ms
if(kernel=='rbf'){
opt_gamma <- opt_line$gamma
}else if(kernel=='lin'){
opt_gamma <- 'NA'
}
cat(file=traceFile, append=TRUE, 'iter:', iter, ' run over for all grid search!\n'
,'opt_cost-',opt_cost, ' opt_gamma-',opt_gamma , ' opt_wt-', opt_wt, ' max_ms-', max_ms, '\n'
)
#applythe optimal parameter on the test data
cat(file=traceFile, append=T, 'iter-', iter , ' opt_wt-', opt_wt
, ' opt_cost-', opt_cost, ' opt_gamma-',opt_gamma ,' svm training on subtrain start! \n')
start1 <- proc.time()
load(paste(path_sepData, '\\trTs_rn.RData', sep=''))
train_data_iter <- trTs_norm$tr
test_data_iter <- trTs_norm$ts
rm(trTs_norm)
train_data_iter$response <- ifelse(train_data_iter$response==1, 'H', 'NH')
resp_ts2 <- test_data_iter$response
x_ts2 <- test_data_iter[, -match('response',names(test_data_iter))]
rm(test_data_iter)
wts <- list( c("NH"=1,"H"=opt_wt) )
if(kernel=='lin'){
svmFit <- svm(
response~., data=train_data_iter
#TrainData, TrainClasses
, cost=opt_cost, class.weights = wts[[1]]
, type=clsMethod, kernel='linear'
, scale=F
)
}else if(kernel=='rbf'){
svmFit <- svm(
response~., data=train_data_iter
#TrainData, TrainClasses
, cost=opt_cost, class.weights = wts[[1]]
, gamma=opt_gamma
, type=clsMethod, kernel='radial'
, scale=F
)
}
save(file='svm_model_on_fullTrain.RData', svmFit)
rm(train_data_iter)
cat(file=traceFile, append=T, 'iter-', iter , ' opt_wt-', opt_wt
, ' opt_cost-', opt_cost, ' opt_gamma-',opt_gamma ,' svm training on training end! '
, 'time used-', (proc.time()-start1)[3]/60, 'min! '
, 'ending time-', date() ,'\n'
)
pred <- predict(svmFit, x_ts2 , scale=F,decision.values = TRUE)
save(file='pred_on_fullTest.RData', pred)
rm(x_ts2)
cat(file=traceFile, append=T,'iter-', iter , ' opt_wt-', opt_wt
, ' opt_cost-', opt_cost, ' opt_gamma-',opt_gamma ,' pred on left out test end! '
, 'time used-', (proc.time()-start1)[3]/60, 'min! '
, 'ending time-', date() ,'\n'
)
predscore <- attr(pred, "decision.values")
pred_model <- list(model_onFullTr=svmFit
, resp_pred_ts = data.frame(resp=resp_ts2,pred=predscore))
save(file=paste('Model&Pred_optWt(', opt_wt,')optCost(', opt_cost, ')optGm(', opt_gamma,').RData', sep='')
, pred_model)
ms <- createCurve_v2(resp = resp_ts2, pred=predscore, recall_tar = crit)
if(kernel=='lin'){
grid_ms = c(opt_cost=opt_cost, opt_wt=opt_wt, max_ms=ms)
}else if(kernel=='rbf'){
grid_ms = c(opt_cost=opt_cost, opt_gamma=opt_gamma, opt_wt=opt_wt, max_ms=ms)
}
result_list <- list(resp_pred =data.frame(resp=resp_ts2,pred=predscore)
, ms=grid_ms)
return(result_list)
}
get_optimum_model_iter_withCV_temp <- function(traceFile, path_sepData, num_pros){
#num_pros <- Sys.getenv('NUMBER_OF_PROCESSORS')
sfInit(parallel=TRUE, cpus=num_pros, type='SOCK')
sfSource("D:\\Shire_project\\03_code\\Jie\\svm\\svm_subFunc_Dec03_normByRow.R")
cat(file=traceFile, append=TRUE, 'iter:', iter, ' sfExport running!\n')
sfExport('grid', 'iter', 'traceFile', 'crit', 'k.folds', 'path_sepData')
sfExport('createCurve', 'grid_search_cv_svm_temp')
#sfClusterEval(library("glmnet"))
sfClusterEval(library("e1071"))
sfClusterEval(library("ROCR"))
sfClusterEval(library("plyr"))
sfClusterEval(library("dplyr"))
cat(file=traceFile, append=TRUE, 'iter:', iter, ' parallele grid search running!\n')
ms_allGrid <- sfClusterApplyLB(1:nrow(grid), grid_search_cv_svm_temp)
sfStop()
cat(file=traceFile, append=TRUE, 'iter:', iter, ' parallele grid search end!\n')
save(file='fullRecords.RData', ms_allGrid)
ms_allGrid <- ldply(ms_allGrid, quickdf)
#choose the best parameter
opt_lines <- ms_allGrid[ms_allGrid$ms==max(ms_allGrid$ms, na.rm = T),]
opt_line <- opt_lines[sample(rep(1:nrow(opt_lines), 2), 1), ]
opt_cost <- opt_line$cost
opt_wt <- opt_line$wt
max_ms <- opt_line$ms
cat(file=traceFile, append=TRUE, 'iter:', iter, ' run over for all grid search!\n'
,'opt_cost-',opt_cost, ' opt_wt-', opt_wt, ' max_ms-', max_ms, '\n'
)
#applythe optimal parameter on the test data
cat(file=traceFile, append=T, 'iter-', iter , ' opt_wt-', opt_wt
, ' opt_cost-', opt_cost, ' svm training on subtrain start! \n')
start1 <- proc.time()
load(paste(path_sepData, '\\trTs_rn.RData', sep=''))
train_data_iter <- trTs_norm$tr
test_data_iter <- trTs_norm$ts
rm(trTs_norm)
train_data_iter$response <- ifelse(train_data_iter$response==1, 'H', 'NH')
resp_ts2 <- test_data_iter$response
x_ts2 <- test_data_iter[, -match('response',names(test_data_iter))]
rm(test_data_iter)
wts <- list( c("NH"=1,"H"=opt_wt) )
svmFit <- svm(
response~., data=train_data_iter
#TrainData, TrainClasses
, cost=opt_cost, class.weights = wts[[1]]
, type="C-classification", kernel='linear'
, scale=F
)
rm(train_data_iter)
cat(file=traceFile, append=T, 'iter-', iter , ' opt_wt-', opt_wt
, ' opt_cost-', opt_cost, ' svm training on training end! '
, 'time used-', (proc.time()-start1)[3]/60, 'min! '
, 'ending time-', date() ,'\n'
)
pred <- predict(svmFit, x_ts2 , sclae=T,decision.values = TRUE)
rm(x_ts2)
cat(file=traceFile, append=T,'iter-', iter , ' opt_wt-', opt_wt
, ' opt_cost-', opt_cost, ' pred on left out test end! '
, 'time used-', (proc.time()-start1)[3]/60, 'min! '
, 'ending time-', date() ,'\n'
)
predscore <- attr(pred, "decision.values")
pred_model <- list(model_onFullTr=svmFit
, resp_pred_ts = data.frame(resp=resp_ts2,pred=predscore))
save(file=paste('Model&Pred_optWt(', opt_wt,')optCost(', opt_cost, ').RData', sep='')
, pred_model)
ms <- createCurve(resp = resp_ts2, pred=predscore, recall_tar = crit)
grid_ms = c(opt_cost=opt_cost, opt_wt=opt_wt, max_ms=ms)
result_list <- list(resp_pred =data.frame(resp=resp_ts2,pred=predscore)
, ms=c(opt_cost=opt_cost, opt_wt=opt_wt, max_ms=ms))
return(result_list)
}
create_tr_ts_forCVi <- function(haeIdx_iter, nonhaeIdx_iter, foldid_hae_cv, trainIdxIterIDNonHAE, i){
haeIdx_iter_cviTs <- haeIdx_iter[foldid_hae_cv[[i]]] #20
nonhaeIdx_iter_cviTs <- unlist(lapply(trainIdxIterIDNonHAE[haeIdx_iter_cviTs], function(x)x[, 1])) #4000 #pay attention to
haeIdx_iter_cviTr <- setdiff(haeIdx_iter, haeIdx_iter_cviTs) #178
nonhaeIdx_iter_cviTr <- setdiff(nonhaeIdx_iter, nonhaeIdx_iter_cviTs) # 35600 Right!
idx_iter_cviTs <- c(haeIdx_iter_cviTs, nonhaeIdx_iter_cviTs) #4020
idx_iter_cviTr <- c(haeIdx_iter_cviTr, nonhaeIdx_iter_cviTr) # 35778
HAE_cviTs <- dat_hae_1111_rf[haeIdx_iter_cviTs, -1]
HAE_cviTr <- dat_hae_1111_rf[haeIdx_iter_cviTr, -1]
NonHae_cviTs <- dat_nonhae_1111_rf[nonhaeIdx_iter_cviTs, -1]
NonHae_cviTr <- dat_nonhae_1111_rf[nonhaeIdx_iter_cviTr, -1]
cv_test_data_lasso <- as.data.frame(rbind(HAE_cviTs, NonHae_cviTs)) #[1] 4020 242
cv_training_data_lasso <- as.data.frame(rbind(HAE_cviTr, NonHae_cviTr)) #[1] 35778 242
cv_data_lasso <- rbind(cv_training_data_lasso, cv_test_data_lasso)
tr_flag <- c(rep(TRUE, nrow(cv_training_data_lasso)), rep(FALSE, nrow(cv_test_data_lasso)))
#return(list(test <- cv_test_data_lasso, train <- cv_training_data_lasso))
return(list(cv_data_lasso=cv_data_lasso, tr_flag=tr_flag))
}
get_auprSummary <- function (data, lev = NULL, model = NULL) {
if (length(levels(data$obs)) > 2)
stop(paste("Your outcome has", length(levels(data$obs)),
"levels. The twoClassSummary() function isn't appropriate."))
library("pROC")
#browser()
if (!all(levels(data[, "pred"]) == levels(data[, "obs"])))
stop("levels of observed and predicted data do not match")
rocObject <- try(pROC::roc(data$obs, data[, lev[1]]), silent = TRUE)
rocAUC <- if (class(rocObject)[1] == "try-error")
NA
else rocObject$auc
library("PRROC")
obs_ori <- ifelse(data$obs=='H', 1, 0)
prObject <- try(PRROC::pr.curve(scores.class0 = data[, lev[1]],
weights.class0 = obs_ori), silent = TRUE)
prAUPR <- if (class(prObject)[1] == "try-error")
NA
else prObject$auc.integral
DR <- table(data[, 'obs'], data[, 'pred'])[2, 1]
out <- c(DR, prAUPR, rocAUC, sensitivity(data[, "pred"], data[, "obs"], lev[1])
, specificity(data[, "pred"], data[, "obs"], lev[2])
, posPredValue(data[, "pred"], data[, "obs"], lev[1], na.rm=T)
)
names(out) <- c('DR', "AUPR","ROC", "Sens", "Spec", "PPV")
out
}
#create own functions to pass cost and weights to svm
get_svmWtModel <- function(){
com_name <- getModelInfo(model='svmLinear2', regex=FALSE)[[1]]
wtsvmlinear <- com_name
#parameter
prm <- data.frame(parameter = c('cost', 'weights'),
class = rep('numeric',2),
label = c('cost', 'weights'))
wtsvmlinear$parameters <- prm
#train the model
wtSVMfit <- function(x, y, wts, param, lev, last, weights, classProbs, ...){
if(param$weights !=1){
wts <- c(param$weights,1)
names(wts)<-levels(y)
}
else wts <- NULL
svm(x = as.matrix(x), y = y,
kernel = 'linear',
cost = param$cost,
class.weights = wts,
probability=classProbs, ...)
}
wtsvmlinear$fit <- wtSVMfit
return(wtsvmlinear)
}
| /codess/SVM/before_Jan01/RBF/svm_subFunc_Dec03_normByRow_rbf.R | no_license | jzhao0802/ShireHAE | R | false | false | 36,381 | r | #some functions
getNonHAEIdx_iter <- function(iter, trainIdxIterIDNonHAE){
nonHAE_idx <- unlist(lapply(trainIdxIterIDNonHAE, function(x){
idx_iter <- x[x[, 2]==iter, 1]
return(idx_iter)
}))
#9860
return(nonHAE_idx)
}
createCurve <- function(resp, pred, recall_tar){
predobj <- prediction(pred, resp)
#add plot
perf <- performance(predobj, 'prec', 'rec') # added by jie for recall-precision plot.
recall <- perf@x.values[[1]]
precision <- perf@y.values[[1]]
auc <- performance(predobj, 'auc')@y.values[[1]]
rePrec <- cbind(recall, precision)
bucket <- cut(recall, breaks=seq(0, 1, 0.005), include.lowest=T,right=F)
rec_prec_byBucket <- aggregate(rePrec, by=list(bucket), function(i)mean(i, na.rm=T))
##in simulation
#recall<- c(0.05, 0.1, 0.25, 0.5)
temp4 <- unlist(lapply(recall_tar, function(X){
idx <- which(abs(rec_prec_byBucket[, 2]-X)==min(abs(rec_prec_byBucket[, 2]-X), na.rm=T))
prec_sel <- rec_prec_byBucket[idx, 3]
return(prec_sel)
}))
##end
return(temp4)
}
createCurve_v2 <- function(resp, pred, recall_tar){
predobj <- prediction(pred, resp)
#add plot
perf <- performance(predobj, 'prec', 'rec') # added by jie for recall-precision plot.
recall <- perf@x.values[[1]]
precision <- perf@y.values[[1]]
auc <- performance(predobj, 'auc')@y.values[[1]]
rePrec <- cbind(recall, precision)
bucket <- cut(recall, breaks=seq(0, 1, 0.005), include.lowest=T,right=F)
rec_prec_byBucket <- aggregate(rePrec, by=list(bucket), function(i)mean(i, na.rm=T))
##in simulation
#recall<- c(0.05, 0.1, 0.25, 0.5)
temp4 <- unlist(lapply(recall_tar, function(X){
idx <- which(abs(rePrec[, 1]-X)==min(abs(rePrec[, 1]-X), na.rm=T))[1]
prec_sel <- rePrec[idx, 2]
return(prec_sel)
}))
##end
return(temp4)
}
msOnTest <- function(pred, response, recall_tar){
temp3 <- lapply(1:nrow(pred), function(i){
predobj <- prediction(pred[i,], response)
#add plot
perf <- performance(predobj, 'prec', 'rec') # added by jie for recall-precision plot.
recall <- perf@x.values[[1]]
precision <- perf@y.values[[1]]
auc <- performance(predobj, 'auc')@y.values[[1]]
bucket <- cut(recall, breaks=seq(0, 1, 0.005), include.lowest=T,right=F)
rec_prec <- cbind(recall, precision)
rec_prec_byBucket <- aggregate(rec_prec, by=list(bucket), function(i)mean(i, na.rm=T))
write.xlsx(rec_prec_result, paste('Curve_sepIter.xlsx', sep=''),
sheetName=paste("Iter_",i), row.names=F, append=T)
##in simulation
temp4 <- unlist(lapply(recall_tar, function(X){
idx <- which(abs(rec_prec_byBucket[, 2]-X)==min(abs(rec_prec_byBucket[, 2]-X), na.rm=T))
prec_sel <- rec_prec_byBucket[idx, 3]
return(prec_sel)
}))
##end
return(list(auc=auc, rec_prec_byBucket=rec_prec_byBucket, ppv=temp4))
})
auc_mean <- mean(unlist(lapply(1:10, function(X)temp3[[X]][[1]])), na.rm=T)
ppv_list <- lapply(temp3, function(X){
return(X[[3]])
})
ppv_df <- ldply(ppv_mean, quickdf)
ppv_mean <- apply(ppv_df, 2, mean, na.rm=T)
ms <- c(auc_mean, ppv_mean)
names(ms) <- c('auc', paste("PPV(recall=", recall_tar,')', sep=''))
return(ms)
}
msOnTest_dong <- function(pred, response, recall_tar){
pred <- apply(pred, 1, mean, na.rm=T)
predobj <- prediction(pred, response)
#add plot
perf <- performance(predobj, 'prec', 'rec') # added by jie for recall-precision plot.
recall <- perf@x.values[[1]]
precision <- perf@y.values[[1]]
auc <- performance(predobj, 'auc')@y.values[[1]]
rec_prec <- data.frame(recall=recall, precision=precision)
rec_prec_omitMiss <- rec_prec[complete.cases(rec_prec),]
aupr <- trapz(rec_prec_omitMiss$recall, rec_prec_omitMiss$precision)
bucket <- cut(recall, breaks=seq(0, 1, 0.005), include.lowest=T,right=F)
rec_prec_byBucket <- aggregate(rec_prec, by=list(bucket), function(i)mean(i, na.rm=T))
#write.csv(rec_prec_byBucket, paste('Curve_dong.csv', sep=''),
# row.names=F, quote=T)
#plot
pdf(file=paste('recall-precision curve on test.pdf', sep=''))
#plot(recall, precision, type='l', main=paste('recall-precision curve'))
plot(perf)
dev.off()
##in simulation
temp4 <- unlist(lapply(recall_tar, function(X){
idx <- which(abs(rec_prec_byBucket[, 2]-X)==min(abs(rec_prec_byBucket[, 2]-X), na.rm=T))
prec_sel <- rec_prec_byBucket[idx, 3]
return(prec_sel)
}))
##end
ms <- c(auc, aupr, temp4)
names(ms) <- c('auc',"aupr", paste("PPV(recall=", recall_tar,')', sep=''))
return(list(ms=ms, curve=rec_prec_byBucket))
}
msOnTest_sep_v2 <- function(pred, response, recall_tar){
#pred <- apply(pred, 1, mean, na.rm=T)
predobj <- prediction(pred, response)
#add plot
perf <- performance(predobj, 'ppv', 'sens') # added by jie for recall-precision plot.
recall <- perf@x.values[[1]]
precision <- perf@y.values[[1]]
auc <- performance(predobj, 'auc')@y.values[[1]]
rec_prec <- data.frame(recall=recall, precision=precision)
rec_prec_omitMiss <- rec_prec[complete.cases(rec_prec),]
aupr <- trapz(rec_prec_omitMiss$recall, rec_prec_omitMiss$precision)
bucket <- cut(recall, breaks=seq(0, 1, 0.01), include.lowest=T,right=F)
rec_prec_byBucket <- aggregate(rec_prec, by=list(bucket), function(i)mean(i, na.rm=T))
#write.csv(rec_prec_byBucket, paste('Curve_dong.csv', sep=''),
# row.names=F, quote=T)
#plot
pdf(file=paste('recall-precision curve on test.pdf', sep=''))
#plot(recall, precision, type='l', main=paste('recall-precision curve'))
plot(perf)
dev.off()
##in simulation
temp4 <- unlist(lapply(recall_tar, function(X){
#idx <- sample(rep(which(abs(rec_prec[, 1]-X)==min(abs(rec_prec[, 1]-X), na.rm=T)), 2), 1)
idx=which(abs(rec_prec[, 1]-X)==min(abs(rec_prec[, 1]-X), na.rm=T))[1]
prec_sel <- rec_prec[idx, 2]
return(prec_sel)
}))
##end
ms <- c(auc, aupr, temp4)
names(ms) <- c('auc',"aupr", paste("PPV(recall=", recall_tar,')', sep=''))
return(list(ms=ms, curve=rec_prec_byBucket, rec_prec=rec_prec))
}
msOnTest_sep <- function(pred, response, recall_tar){
#pred <- apply(pred, 1, mean, na.rm=T)
predobj <- prediction(pred, response)
#add plot
perf <- performance(predobj, 'prec', 'rec') # added by jie for recall-precision plot.
recall <- perf@x.values[[1]]
precision <- perf@y.values[[1]]
auc <- performance(predobj, 'auc')@y.values[[1]]
rec_prec <- data.frame(recall=recall, precision=precision)
rec_prec_omitMiss <- rec_prec[complete.cases(rec_prec),]
aupr <- trapz(rec_prec_omitMiss$recall, rec_prec_omitMiss$precision)
bucket <- cut(recall, breaks=seq(0, 1, 0.005), include.lowest=T,right=F)
rec_prec_byBucket <- aggregate(rec_prec, by=list(bucket), function(i)mean(i, na.rm=T))
#write.csv(rec_prec_byBucket, paste('Curve_dong.csv', sep=''),
# row.names=F, quote=T)
#plot
pdf(file=paste('recall-precision curve on test.pdf', sep=''))
#plot(recall, precision, type='l', main=paste('recall-precision curve'))
plot(perf)
dev.off()
##in simulation
temp4 <- unlist(lapply(recall_tar, function(X){
idx <- which(abs(rec_prec_byBucket[, 2]-X)==min(abs(rec_prec_byBucket[, 2]-X), na.rm=T))
prec_sel <- rec_prec_byBucket[idx, 3]
return(prec_sel)
}))
##end
ms <- c(auc, aupr, temp4)
names(ms) <- c('auc',"aupr", paste("PPV(recall=", recall_tar,')', sep=''))
return(list(ms=ms, curve=rec_prec_byBucket))
}
getBkt <- function(pred, resp, n.bkt){
predobj <- prediction(pred, resp)
#add plot
perf <- performance(predobj, 'prec', 'rec') # added by jie for recall-precision plot.
recall <- perf@x.values[[1]]
precision <- perf@y.values[[1]]
rec_prec <- cbind(recall, precision)
auc <- performance(predobj, 'auc')@y.values[[1]]
rec_prec_omitMiss <- rec_prec[complete.cases(rec_prec),]
aupr <- trapz(rec_prec_omitMiss[, 1], rec_prec_omitMiss[, 2])
bucket <- cut(recall, breaks=seq(0, 1, 1/n.bkt), include.lowest=T,right=F)
rec_prec_byBucket <- aggregate(rec_prec, by=list(bucket), function(i)mean(i, na.rm=T))
return(list(bkt <- rec_prec_byBucket, ms <- c(auc=auc, aupr=aupr)))
}
get_out_of_sample_curve <- function(wt, lmd, foldid, response,data_mtx){
curve_fromCV <- lapply(1:k.folds, function(i){
dataset_cvi <- create_tr_ts_forCVi(haeIdx_iter, nonhaeIdx_iter, foldid_hae_cv, trainIdxIterIDNonHAE, i)
train_cvi <- dataset_cvi[[2]]
test_cvi <- dataset_cvi[[1]]
cv_training_resp <- train_cvi$response
cv_training_matrix<- model.matrix(response~., data=train_cvi)[, -1]
cv_test_resp<- test_cvi$response # select 1 fold as test data
cv_test_matrix<- model.matrix(response~., data=test_cvi)[, -1]
wt_vct <- ifelse(cv_training_resp==1, 1, wt)
fit_lasso<- glmnet(cv_training_matrix, cv_training_resp, weights=wt_vct,
lambda=lmd, family="binomial", alpha=1, standardize=F)
test_pred<- predict(fit_lasso, cv_test_matrix, type="response") #2713 599
curve <- data.frame(resp=cv_test_resp, pred=test_pred)
return(curve)
})
return(curve_fromCV)
}
grid_search_v2 <- function(r, training_data_lasso){
i <- grid[r, 2]
j <- grid[r, 1]
cv_training_data_lasso<- training_data_lasso[foldid!=i,] # select 9 folds as training data
cv_training_matrix<- model.matrix(response~., data=cv_training_data_lasso)[,-1]
cv_test_data_lasso<- training_data_lasso[foldid==i,] # select 1 fold as test data
cv_test_matrix<- model.matrix(response~., data=cv_test_data_lasso)[,-1]
fit_lasso<- glmnet(cv_training_matrix, cv_training_data_lasso$response,
lambda=lambda_seq, family="binomial", alpha=1,
weights = ifelse(cv_training_data_lasso$response==1, 1, j),
standardize=F)
test_pred<- predict(fit_lasso, cv_test_matrix, type="response")
test_pred_avg<- apply(test_pred, 2, function(x){createCurve(cv_test_data_lasso$response , x, crit)})
test_pred_avg<- c(test_pred_avg , rep(NA , length(lambda_seq) - length(test_pred_avg))) # some small lambda may not be reached
cv_auc<- c(wt=j, fold=i, ms=test_pred_avg) # calculate the AUC based on left-out fold
return(cv_auc)
}#end for grid
grid_search_1fold_svm_findPred <- function(r, data, data_ts){
cost <- grid[r, 1]
wt <- grid[r, 2]
wts <- list( c("NH"=1,"H"=wt) )
resp_ts <- ifelse(data_ts$response=='H', 1, 0)
x_ts <- data_ts[, -match('response', names(data_ts))]
cat(file = traceFile, append=T, 'iter-', iter , ' wt-', wt, ' cost-', cost, ' start!\n')
start <- proc.time()
load(paste('pred_wt(', wt,')cost(', cost, ').RData', sep=''))
svmFit <- pred_model$model_onSubTr
cat(file=traceFile, append=T, 'iter-', iter , ' wt-', wt, ' cost-', cost, ' svm training on subtrain loading end! '
, 'time used-', (proc.time()-start)[3]/60, 'min! '
, 'ending time-', date() ,'\n'
)
pred <- predict(svmFit, x_ts , sclae=T,decision.values = TRUE)
cat(file=traceFile, append=T,'iter-', iter , ' wt-', wt, ' cost-', cost, ' pred on left out fold end! '
, 'time used-', (proc.time()-start)[3]/60, 'min! '
, 'ending time-', date() ,'\n'
)
predscore <- attr(pred, "decision.values")
pred_model <- list(model_onSubTr=svmFit, pred_ts = predscore)
save(file=paste('pred_wt(', wt,')cost(', cost, ')_v2.RData', sep=''), pred_model)
ms <- createCurve(resp = resp_ts, pred=predscore, recall_tar = crit)
grid_ms = c(cost=cost, weight=wt, ms=ms)
cat(file=traceFile, append=T,'iter-', iter , ' wt-', wt, ' cost-', cost, ' ms-', ms, '\n')
return(list(grid_ms = grid_ms, model_onSubTr=svmFit, pred_ts = pred))
}
grid_search_1fold_svm <- function(r, data, data_ts){
cost <- grid[r, 1]
wt <- grid[r, 2]
wts <- list( c("NH"=1,"H"=wt) )
resp_ts <- ifelse(data_ts$response=='H', 1, 0)
x_ts <- data_ts[, -match('response', names(data_ts))]
cat(file = traceFile, append=T, 'iter-', iter , ' wt-', wt, ' cost-', cost, ' start!\n')
start <- proc.time()
svmFit <- svm(
response~., data=data
#TrainData, TrainClasses
, cost=cost, class.weights = wts[[1]]
, type="C-classification", kernel='linear'
, scale=T
)
cat(file=traceFile, append=T, 'iter-', iter , ' wt-', wt, ' cost-', cost, ' svm training on subtrain end! '
, 'time used-', (proc.time()-start)[3]/60, 'min! '
, 'ending time-', date() ,'\n'
)
pred <- predict(svmFit, x_ts , sclae=T,decision.values = TRUE)
cat(file=traceFile, append=T,'iter-', iter , ' wt-', wt, ' cost-', cost, ' pred on left out fold end! '
, 'time used-', (proc.time()-start)[3]/60, 'min! '
, 'ending time-', date() ,'\n'
)
predscore <- attr(pred, "decision.values")
pred_model <- list(model_onSubTr=svmFit, pred_ts = predscore)
save(file=paste('pred_wt(', wt,')cost(', cost, ').RData', sep=''), pred_model)
ms <- createCurve(resp = resp_ts, pred=predscore, recall_tar = crit)
grid_ms = c(cost=cost, weight=wt, ms=ms)
cat(file=traceFile, append=T,'iter-', iter , ' wt-', wt, ' cost-', cost, ' ms-', ms, '\n')
return(list(grid_ms = grid_ms, model_onSubTr=svmFit, pred_ts = pred))
}
get_optimum_model_iter <- function(haeIdx_iter, nonhaeIdx_iter, foldid_hae_cv, trainIdxIterIDNonHAE, traceFile, grid, num_pros){
dataset_cvi <- create_tr_ts_forCVi(haeIdx_iter_forCV, nonhaeIdx_iter_forCV, foldid_hae_cv, trainIdxIterIDNonHAE, i)
#cv_training_data_lasso <- dataset_cvi[[2]]
#cv_test_data_lasso <- dataset_cvi[[1]]
cv_data <- dataset_cvi$cv_data_lasso
cv_tr_flag <- dataset_cvi$tr_flag
cv_data$response <- as.factor(ifelse(cv_data$response==1,'H', 'NH'))
cv_data$region <- as.numeric(as.factor(cv_data$region))
cv_data$gender <- as.numeric(as.factor(cv_data$gender))
cv_data_tr <- cv_data[cv_tr_flag,] #[1] 127032 242
cv_data_ts <- cv_data[!cv_tr_flag,] #[1] 31356 242
cv_resp_ts <- cv_data_ts$response
#generate dummy bariables for catigorical varibles(i.e. gender and region)
cati_var <- c('gender', 'region')
dummy_df <- model.matrix(response~., data=cv_data_tr[, match(cati_var, names(cv_data_tr))]
)
#scale the 4/5 folds data and save the mean and sd to apply on 1/5 fold scaling
conti_var <- c('age', 'lookback_days', grep('freq$', names(cv_data_tr), value=T))
mean_sd <- lapply(conti_var, function(v){
var <- cv_data_tr[, v]
var_scale <- scale(var)
mean <- mean(var)
sd <- sd(var)
return(c(mean=mean, sd=sd))
})
conti_scale_df <- scale(cv_data_tr[, match(conti_var, name(cv_data_cv))])
cv_data_tr_scale <- cbind(cv_data_tr[, -match(c(conti_var, cati_var),names(cv_data_cv))], dummy_df, conti_scale_df)
#num_pros <- Sys.getenv('NUMBER_OF_PROCESSORS')
sfInit(parallel=TRUE, cpus=num_pros, type='SOCK')
sfSource("D:\\Shire_project\\03_code\\Jie\\svm\\svm_subFunc_Dec03.R")
cat(file=traceFile, append=TRUE, 'iter:', iter, ' sfExport running!\n')
sfExport('cv_data_tr', 'crit', 'cv_data_ts'
, 'grid', 'iter', 'traceFile')
sfExport('createCurve', 'grid_search_1fold_svm')
#sfClusterEval(library("glmnet"))
sfClusterEval(library("e1071"))
sfClusterEval(library("ROCR"))
sfClusterEval(library("plyr"))
sfClusterEval(library("dplyr"))
cat(file=traceFile, append=TRUE, 'iter:', iter, ' parallele grid search running!\n')
ms_fromGrid <- sfClusterApplyLB(1:nrow(grid), grid_search_1fold_svm, cv_data_tr, cv_data_ts)
sfStop()
cat(file=traceFile, append=TRUE, 'iter:', iter, ' parallele grid search end!\n')
save(file='fullRecords.RData', ms_fromGrid)
ms_list <- lapply(ms_fromGrid, function(X)X[[1]])
ms_df <- ldply(ms_list, quickdf)
cat(file=traceFile, append=TRUE, 'iter:', iter, ' run over for initial grid search!\n')
return(ms_df)
}
grid_search_cv_svm <- function(r){
cost <- grid[r, 1]
wt <- grid[r, 2]
wts <- list( c("NH"=1,"H"=wt) )
cat(file = traceFile, append=T, 'iter-', iter , ' wt-', wt, ' cost-', cost, ' start!\n')
start1 <- proc.time()
ms_onCV <- lapply(1:k.folds, function(i){
#load the cv data for cv i
load(paste(path_sepData, '\\dataset_cv', i, '.RData', sep=''))
cv_data_tr <- cv_trTs_norm$tr #[1] 127032 242
cv_data_tr$response <- as.factor(ifelse(cv_data_tr$response==1, 'H', 'NH'))
cv_data_ts <- cv_trTs_norm$ts #[1] 31356 242
cv_resp_ts <- cv_data_ts$response
cv_x_ts <- cv_data_ts[, -match('response', names(cv_data_ts))]
cat(file=traceFile, append=T, 'iter-', iter , ' wt-', wt, ' cost-', cost, ' Fold-', i, ' svm training on subtrain start! \n')
start <- proc.time()
svmFit <- svm(
response~., data=cv_data_tr
#TrainData, TrainClasses
, cost=cost, class.weights = wts[[1]]
, type="C-classification", kernel='linear'
, scale=F
)
save(file=paste('svmModel_cost_', cost, '_wt_', wt, '_fold_', i, '.RData', sep=''), svmFit)
cat(file=traceFile, append=T, 'iter-', iter , ' wt-', wt, ' cost-', cost, ' Fold-', i, ' svm training on subtrain end! '
, 'time used-', (proc.time()-start1)[3]/60, 'min! '
, 'ending time-', date() ,'\n'
)
pred <- predict(svmFit, cv_x_ts , scale=F,decision.values = TRUE)
cat(file=traceFile, append=T,'iter-', iter , ' wt-', wt, ' cost-', cost, ' Fold-', i, ' pred on left out fold end! '
, 'time used-', (proc.time()-start1)[3]/60, 'min! '
, 'ending time-', date() ,'\n'
)
predscore <- attr(pred, "decision.values")
pred_model <- list(model_onSubTr=svmFit, resp_pred_ts = data.frame(resp=cv_resp_ts, pred=predscore))
save(file=paste('pred_wt(', wt,')cost(', cost, ')Fold',i, '.RData', sep=''), pred_model)
ms <- createCurve_v2(resp = cv_resp_ts, pred=predscore, recall_tar = crit)
grid_ms = c(cost=cost, weight=wt, fold=i, ms=ms)
cat(file=traceFile, append=T,'iter-', iter , ' wt-', wt, ' cost-', cost, ' Fold-', i, ' ms-', ms, '\n')
return( grid_ms)
})
ms_onCV_df <- ldply(ms_onCV, quickdf)
ms_avg <- aggregate(ms_onCV_df$ms, by=list(cost=ms_onCV_df$cost, wt=ms_onCV_df$weight), mean, na.rm=T)
names(ms_avg) <- c('cost', 'wt', 'ms')
cat(file=traceFile, append=T,'iter-', iter , ' wt-', wt, ' cost-', cost, ' ms_avg-', ms_avg[1, 3], '\n')
return(ms_avg)
}
par_onCV <- function(i){
#load the cv data for cv i
load(paste(path_sepData, '\\dataset_cv', i, '.RData', sep=''))
cv_data_tr <- cv_trTs_norm$tr #[1] 127032 242
cv_data_tr$response <- as.factor(ifelse(cv_data_tr$response==1, 'H', 'NH'))
cv_data_ts <- cv_trTs_norm$ts #[1] 31356 242
cv_resp_ts <- cv_data_ts$response
cv_x_ts <- cv_data_ts[, -match('response', names(cv_data_ts))]
cat(file=traceFile, append=T, 'iter-', iter , ' wt-', wt, ' cost-', cost, ' gamma-', gamma,' Fold-', i, ' svm training on subtrain start! \n')
start1 <- proc.time()
if(kernel=='lin'){
svmFit <- svm(
response~., data=cv_data_tr
#TrainData, TrainClasses
, cost=cost, class.weights = wts[[1]]
, type=clsMethod, kernel='linear'
, scale=F
)
}else if(kernel=='rbf'){
svmFit <- svm(
response~., data=cv_data_tr
#TrainData, TrainClasses
, cost=cost, class.weights = wts[[1]]
, gamma=gamma
, type=clsMethod, kernel='radial'
, scale=F
)
}
#load(file=paste('svmModel_cost_', cost, '_wt_', wt, '_fold_', i, '.RData', sep=''))
cat(file=traceFile, append=T, 'iter-', iter , ' wt-', wt, ' cost-', cost,' gamma-', gamma, ' Fold-', i, ' svm training on subtrain end! '
, 'time used-', (proc.time()-start1)[3]/60, 'min! '
, 'ending time-', date() ,'\n'
)
pred <- predict(svmFit, cv_x_ts , scale=F,decision.values = TRUE)
cat(file=traceFile, append=T,'iter-', iter , ' wt-', wt, ' cost-', cost, ' gamma-', gamma, ' Fold-', i, ' pred on left out fold end! '
, 'time used-', (proc.time()-start1)[3]/60, 'min! '
, 'ending time-', date() ,'\n'
)
predscore <- attr(pred, "decision.values")
pred_model <- list(model_onSubTr=svmFit, resp_pred_ts = data.frame(resp=cv_resp_ts, pred=predscore))
save(file=paste('pred_wt(', wt,')cost(', cost, ')gm(', gamma, ')Fold',i, '.RData', sep=''), pred_model)
ms <- createCurve(resp = cv_resp_ts, pred=predscore, recall_tar = crit)
if(kernel=='lin'){
grid_ms = c(cost=cost, weight=wt, fold=i, ms=ms)
}else if(kernel=='rbf'){
grid_ms = c(cost=cost, gamma=gamma, weight=wt, fold=i, ms=ms)
}
cat(file=traceFile, append=T,'iter-', iter , ' wt-', wt, ' cost-', cost,' gamma-', gamma, ' Fold-', i, ' ms-', ms, '\n')
return( grid_ms)
}
grid_search_cv_svm_par <- function(r){
if(kernel=='lin'){
cost <- grid[r, 1]
wt <- grid[r, 2]
gamma <- 'NA'
}else if(kernel=='rbf'){
cost <- grid[r, 1]
gamma <- grid[r, 2]
wt <- grid[r, 3]
}
wts <- list( c("NH"=1,"H"=wt) )
cat(file = traceFile, append=T, 'iter-', iter , ' wt-', wt, ' cost-', cost, ' gamma-', gamma, ' start!\n')
start1 <- proc.time()
#num_pros <- Sys.getenv('NUMBER_OF_PROCESSORS')
sfInit(parallel=TRUE, cpus=k.folds, type='SOCK')
sfSource("D:\\Shire_project\\03_code\\Jie\\svm\\RBF\\svm_subFunc_Dec03_normByRow_rbf.R")
cat(file=traceFile, append=TRUE, 'iter:', iter, ' cost-', cost,' gamma-', gamma, ' wt-', wt, ' sfExport running!\n')
sfExport('grid', 'cost', 'wt', 'wts', 'iter', 'traceFile', 'crit', 'k.folds', 'path_sepData'
, 'ONE', 'kernel', 'clsMethod', 'gamma')
sfExport('createCurve_v2', 'par_onCV')
#sfClusterEval(library("glmnet"))
sfClusterEval(library("e1071"))
sfClusterEval(library("ROCR"))
sfClusterEval(library("plyr"))
sfClusterEval(library("dplyr"))
cat(file=traceFile, append=TRUE, 'iter:', iter, ' cost-', cost, 'wt-', wt, ' gamma-', gamma, ' parallele onCV running start!\n')
ms_onCV <- sfClusterApplyLB(1:k.folds, par_onCV)
sfStop()
cat(file=traceFile, append=TRUE, 'iter:', iter, ' cost-', cost, 'wt-', wt,' gamma-', gamma, ' parallele onCV running end!', 'usedTime-', (proc.time()-start1)[3]/60, 'min\n')
ms_onCV_df <- ldply(ms_onCV, quickdf)
if(kernel=='lin'){
ms_avg <- aggregate(ms_onCV_df$ms, by=list(cost=ms_onCV_df$cost, wt=ms_onCV_df$weight), mean, na.rm=T)
names(ms_avg) <- c('cost', 'wt', 'ms')
}else if(kernel=='rbf'){
ms_avg <- aggregate(ms_onCV_df$ms, by=list(cost=ms_onCV_df$cost, gamma=ms_onCV_df$gamma, wt=ms_onCV_df$weight), mean, na.rm=T)
names(ms_avg) <- c('cost', 'gamma', 'wt', 'ms')
}
cat(file=traceFile, append=T,'iter-', iter , ' wt-', wt, ' cost-', cost,' gamma-', gamma, ' ms_avg-', ms_avg[1, ncol(ms_avg)], '\n')
return(ms_avg)
}
get_optimum_model_iter_withCV <- function(traceFile, path_sepData, num_pros){
#num_pros <- Sys.getenv('NUMBER_OF_PROCESSORS')
sfInit(parallel=TRUE, cpus=num_pros, type='SOCK')
sfSource("D:\\Shire_project\\03_code\\Jie\\svm\\RBF\\svm_subFunc_Dec03_normByRow_rbf.R")
cat(file=traceFile, append=TRUE, 'iter:', iter, ' sfExport running!\n')
sfExport('grid', 'iter', 'traceFile', 'crit', 'k.folds', 'path_sepData'
, 'ONE', 'kernel', 'clsMethod')
sfExport('createCurve_v2', 'grid_search_cv_svm_par')
#sfClusterEval(library("glmnet"))
sfClusterEval(library("e1071"))
sfClusterEval(library("ROCR"))
sfClusterEval(library("plyr"))
sfClusterEval(library("dplyr"))
sfClusterEval(library('snowfall'))
cat(file=traceFile, append=TRUE, 'iter:', iter, ' parallele grid search running!\n')
ms_allGrid <- sfClusterApplyLB(1:nrow(grid), grid_search_cv_svm_par)
sfStop()
cat(file=traceFile, append=TRUE, 'iter:', iter, ' parallele grid search end!\n')
save(file='fullRecords.RData', ms_allGrid)
ms_allGrid <- ldply(ms_allGrid, quickdf)
#choose the best parameter
opt_lines <- ms_allGrid[ms_allGrid$ms==max(ms_allGrid$ms, na.rm = T),]
opt_line <- opt_lines[sample(rep(1:nrow(opt_lines), 2), 1), ]
opt_cost <- opt_line$cost
opt_wt <- opt_line$wt
max_ms <- opt_line$ms
if(kernel=='rbf'){
opt_gamma <- opt_line$gamma
}else if(kernel=='lin'){
opt_gamma <- 'NA'
}
cat(file=traceFile, append=TRUE, 'iter:', iter, ' run over for all grid search!\n'
,'opt_cost-',opt_cost, ' opt_gamma-',opt_gamma , ' opt_wt-', opt_wt, ' max_ms-', max_ms, '\n'
)
#applythe optimal parameter on the test data
cat(file=traceFile, append=T, 'iter-', iter , ' opt_wt-', opt_wt
, ' opt_cost-', opt_cost, ' opt_gamma-',opt_gamma ,' svm training on subtrain start! \n')
start1 <- proc.time()
load(paste(path_sepData, '\\trTs_rn.RData', sep=''))
train_data_iter <- trTs_norm$tr
test_data_iter <- trTs_norm$ts
rm(trTs_norm)
train_data_iter$response <- ifelse(train_data_iter$response==1, 'H', 'NH')
resp_ts2 <- test_data_iter$response
x_ts2 <- test_data_iter[, -match('response',names(test_data_iter))]
rm(test_data_iter)
wts <- list( c("NH"=1,"H"=opt_wt) )
if(kernel=='lin'){
svmFit <- svm(
response~., data=train_data_iter
#TrainData, TrainClasses
, cost=opt_cost, class.weights = wts[[1]]
, type=clsMethod, kernel='linear'
, scale=F
)
}else if(kernel=='rbf'){
svmFit <- svm(
response~., data=train_data_iter
#TrainData, TrainClasses
, cost=opt_cost, class.weights = wts[[1]]
, gamma=opt_gamma
, type=clsMethod, kernel='radial'
, scale=F
)
}
save(file='svm_model_on_fullTrain.RData', svmFit)
rm(train_data_iter)
cat(file=traceFile, append=T, 'iter-', iter , ' opt_wt-', opt_wt
, ' opt_cost-', opt_cost, ' opt_gamma-',opt_gamma ,' svm training on training end! '
, 'time used-', (proc.time()-start1)[3]/60, 'min! '
, 'ending time-', date() ,'\n'
)
pred <- predict(svmFit, x_ts2 , scale=F,decision.values = TRUE)
save(file='pred_on_fullTest.RData', pred)
rm(x_ts2)
cat(file=traceFile, append=T,'iter-', iter , ' opt_wt-', opt_wt
, ' opt_cost-', opt_cost, ' opt_gamma-',opt_gamma ,' pred on left out test end! '
, 'time used-', (proc.time()-start1)[3]/60, 'min! '
, 'ending time-', date() ,'\n'
)
predscore <- attr(pred, "decision.values")
pred_model <- list(model_onFullTr=svmFit
, resp_pred_ts = data.frame(resp=resp_ts2,pred=predscore))
save(file=paste('Model&Pred_optWt(', opt_wt,')optCost(', opt_cost, ')optGm(', opt_gamma,').RData', sep='')
, pred_model)
ms <- createCurve_v2(resp = resp_ts2, pred=predscore, recall_tar = crit)
if(kernel=='lin'){
grid_ms = c(opt_cost=opt_cost, opt_wt=opt_wt, max_ms=ms)
}else if(kernel=='rbf'){
grid_ms = c(opt_cost=opt_cost, opt_gamma=opt_gamma, opt_wt=opt_wt, max_ms=ms)
}
result_list <- list(resp_pred =data.frame(resp=resp_ts2,pred=predscore)
, ms=grid_ms)
return(result_list)
}
get_optimum_model_iter_withCV_temp <- function(traceFile, path_sepData, num_pros){
#num_pros <- Sys.getenv('NUMBER_OF_PROCESSORS')
sfInit(parallel=TRUE, cpus=num_pros, type='SOCK')
sfSource("D:\\Shire_project\\03_code\\Jie\\svm\\svm_subFunc_Dec03_normByRow.R")
cat(file=traceFile, append=TRUE, 'iter:', iter, ' sfExport running!\n')
sfExport('grid', 'iter', 'traceFile', 'crit', 'k.folds', 'path_sepData')
sfExport('createCurve', 'grid_search_cv_svm_temp')
#sfClusterEval(library("glmnet"))
sfClusterEval(library("e1071"))
sfClusterEval(library("ROCR"))
sfClusterEval(library("plyr"))
sfClusterEval(library("dplyr"))
cat(file=traceFile, append=TRUE, 'iter:', iter, ' parallele grid search running!\n')
ms_allGrid <- sfClusterApplyLB(1:nrow(grid), grid_search_cv_svm_temp)
sfStop()
cat(file=traceFile, append=TRUE, 'iter:', iter, ' parallele grid search end!\n')
save(file='fullRecords.RData', ms_allGrid)
ms_allGrid <- ldply(ms_allGrid, quickdf)
#choose the best parameter
opt_lines <- ms_allGrid[ms_allGrid$ms==max(ms_allGrid$ms, na.rm = T),]
opt_line <- opt_lines[sample(rep(1:nrow(opt_lines), 2), 1), ]
opt_cost <- opt_line$cost
opt_wt <- opt_line$wt
max_ms <- opt_line$ms
cat(file=traceFile, append=TRUE, 'iter:', iter, ' run over for all grid search!\n'
,'opt_cost-',opt_cost, ' opt_wt-', opt_wt, ' max_ms-', max_ms, '\n'
)
#applythe optimal parameter on the test data
cat(file=traceFile, append=T, 'iter-', iter , ' opt_wt-', opt_wt
, ' opt_cost-', opt_cost, ' svm training on subtrain start! \n')
start1 <- proc.time()
load(paste(path_sepData, '\\trTs_rn.RData', sep=''))
train_data_iter <- trTs_norm$tr
test_data_iter <- trTs_norm$ts
rm(trTs_norm)
train_data_iter$response <- ifelse(train_data_iter$response==1, 'H', 'NH')
resp_ts2 <- test_data_iter$response
x_ts2 <- test_data_iter[, -match('response',names(test_data_iter))]
rm(test_data_iter)
wts <- list( c("NH"=1,"H"=opt_wt) )
svmFit <- svm(
response~., data=train_data_iter
#TrainData, TrainClasses
, cost=opt_cost, class.weights = wts[[1]]
, type="C-classification", kernel='linear'
, scale=F
)
rm(train_data_iter)
cat(file=traceFile, append=T, 'iter-', iter , ' opt_wt-', opt_wt
, ' opt_cost-', opt_cost, ' svm training on training end! '
, 'time used-', (proc.time()-start1)[3]/60, 'min! '
, 'ending time-', date() ,'\n'
)
pred <- predict(svmFit, x_ts2 , sclae=T,decision.values = TRUE)
rm(x_ts2)
cat(file=traceFile, append=T,'iter-', iter , ' opt_wt-', opt_wt
, ' opt_cost-', opt_cost, ' pred on left out test end! '
, 'time used-', (proc.time()-start1)[3]/60, 'min! '
, 'ending time-', date() ,'\n'
)
predscore <- attr(pred, "decision.values")
pred_model <- list(model_onFullTr=svmFit
, resp_pred_ts = data.frame(resp=resp_ts2,pred=predscore))
save(file=paste('Model&Pred_optWt(', opt_wt,')optCost(', opt_cost, ').RData', sep='')
, pred_model)
ms <- createCurve(resp = resp_ts2, pred=predscore, recall_tar = crit)
grid_ms = c(opt_cost=opt_cost, opt_wt=opt_wt, max_ms=ms)
result_list <- list(resp_pred =data.frame(resp=resp_ts2,pred=predscore)
, ms=c(opt_cost=opt_cost, opt_wt=opt_wt, max_ms=ms))
return(result_list)
}
create_tr_ts_forCVi <- function(haeIdx_iter, nonhaeIdx_iter, foldid_hae_cv, trainIdxIterIDNonHAE, i){
haeIdx_iter_cviTs <- haeIdx_iter[foldid_hae_cv[[i]]] #20
nonhaeIdx_iter_cviTs <- unlist(lapply(trainIdxIterIDNonHAE[haeIdx_iter_cviTs], function(x)x[, 1])) #4000 #pay attention to
haeIdx_iter_cviTr <- setdiff(haeIdx_iter, haeIdx_iter_cviTs) #178
nonhaeIdx_iter_cviTr <- setdiff(nonhaeIdx_iter, nonhaeIdx_iter_cviTs) # 35600 Right!
idx_iter_cviTs <- c(haeIdx_iter_cviTs, nonhaeIdx_iter_cviTs) #4020
idx_iter_cviTr <- c(haeIdx_iter_cviTr, nonhaeIdx_iter_cviTr) # 35778
HAE_cviTs <- dat_hae_1111_rf[haeIdx_iter_cviTs, -1]
HAE_cviTr <- dat_hae_1111_rf[haeIdx_iter_cviTr, -1]
NonHae_cviTs <- dat_nonhae_1111_rf[nonhaeIdx_iter_cviTs, -1]
NonHae_cviTr <- dat_nonhae_1111_rf[nonhaeIdx_iter_cviTr, -1]
cv_test_data_lasso <- as.data.frame(rbind(HAE_cviTs, NonHae_cviTs)) #[1] 4020 242
cv_training_data_lasso <- as.data.frame(rbind(HAE_cviTr, NonHae_cviTr)) #[1] 35778 242
cv_data_lasso <- rbind(cv_training_data_lasso, cv_test_data_lasso)
tr_flag <- c(rep(TRUE, nrow(cv_training_data_lasso)), rep(FALSE, nrow(cv_test_data_lasso)))
#return(list(test <- cv_test_data_lasso, train <- cv_training_data_lasso))
return(list(cv_data_lasso=cv_data_lasso, tr_flag=tr_flag))
}
get_auprSummary <- function (data, lev = NULL, model = NULL) {
if (length(levels(data$obs)) > 2)
stop(paste("Your outcome has", length(levels(data$obs)),
"levels. The twoClassSummary() function isn't appropriate."))
library("pROC")
#browser()
if (!all(levels(data[, "pred"]) == levels(data[, "obs"])))
stop("levels of observed and predicted data do not match")
rocObject <- try(pROC::roc(data$obs, data[, lev[1]]), silent = TRUE)
rocAUC <- if (class(rocObject)[1] == "try-error")
NA
else rocObject$auc
library("PRROC")
obs_ori <- ifelse(data$obs=='H', 1, 0)
prObject <- try(PRROC::pr.curve(scores.class0 = data[, lev[1]],
weights.class0 = obs_ori), silent = TRUE)
prAUPR <- if (class(prObject)[1] == "try-error")
NA
else prObject$auc.integral
DR <- table(data[, 'obs'], data[, 'pred'])[2, 1]
out <- c(DR, prAUPR, rocAUC, sensitivity(data[, "pred"], data[, "obs"], lev[1])
, specificity(data[, "pred"], data[, "obs"], lev[2])
, posPredValue(data[, "pred"], data[, "obs"], lev[1], na.rm=T)
)
names(out) <- c('DR', "AUPR","ROC", "Sens", "Spec", "PPV")
out
}
#create own functions to pass cost and weights to svm
get_svmWtModel <- function(){
com_name <- getModelInfo(model='svmLinear2', regex=FALSE)[[1]]
wtsvmlinear <- com_name
#parameter
prm <- data.frame(parameter = c('cost', 'weights'),
class = rep('numeric',2),
label = c('cost', 'weights'))
wtsvmlinear$parameters <- prm
#train the model
wtSVMfit <- function(x, y, wts, param, lev, last, weights, classProbs, ...){
if(param$weights !=1){
wts <- c(param$weights,1)
names(wts)<-levels(y)
}
else wts <- NULL
svm(x = as.matrix(x), y = y,
kernel = 'linear',
cost = param$cost,
class.weights = wts,
probability=classProbs, ...)
}
wtsvmlinear$fit <- wtSVMfit
return(wtsvmlinear)
}
|
\name{fitCBB}
\alias{fitCBB}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{fitCBB}
\description{Fitting an specific generalized archimedean copula
}
\usage{
fitCBB(x, y, theta0, delta0, copulamodel = c("pCBB1", "pCBB2", "pCBB3", "pCBB4",
"pCBB5", "pCBB6", "pCBB7", "pCMax", "pCMin"), m, step, deltamin, thetamin,
test = c("wilcox.test", "t.test"), empcumulative = TRUE, cumulative1,
cumulative2, parameters1, parameters2)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{ real vector }
\item{y}{ real vector }
\item{theta0}{ parameter in the model pCBBi (in variable copulamodel). For default, theta0 is obtained from fitlambdas }
\item{delta0}{ parameter in the model pCBBi (in variable copulamodel). For default, delta0 is obtained from fitlambdas}
\item{copulamodel}{specific model that we need to fit, it need to be one option from: pCBB1 (default), pCBB2, pCBB3, pCBB4, pCBB5, pCBB6, pCBB7, pCMax, pCMin
}
\item{m}{ integer positive number (default=15) }
\item{step}{ real positive number (default=0.01) }
\item{deltamin}{ minimum value admited for delta's domain (default=epsilon-see details) }
\item{thetamin}{ minimum value admited for theta's domain (default=epsilon-see details) }
\item{test}{ test used for fitting selection, it need to be wilcox.test(default) or t.test }
\item{empcumulative}{ logical value, can be TRUE (default) or FALSE (see details) }
\item{cumulative1}{ marginal cumulative associated with x. Can be used pnorm, pbeta, pempirical,...(only used when empcumulative=FALSE)
}
\item{cumulative2}{ marginal cumulative associated with y. Can be used pnorm, pbeta, pempirical,...(only used when empcumulative=FALSE)
}
\item{parameters1}{ specifics parameters for cumulative1's definition }
\item{parameters2}{ specifics parameters for cumulative2's definition }
}
\details{The function constructs a neighbourhood around (theta0,delta0) for family specified in \sQuote{copulamodel} , and using the test specified in \sQuote{test} the function search the best (theta*,delta*) in the neighbourhood such that copulamodel(theta*,delta*,u,v) is close to the bivariate empirical copula from (x,y). Where (u,v)=(cumulative1(x),cumulative2(y)).
m and step control the neighbourhood' definition.
deltamin and thetamin depend on the model worked. For default, we have, pCBB1: deltamin=1, thetamin=0.05; pCBB2: deltamin=0.05, thetamin = 0.05; pCBB3: deltamin=1, thetamin=0.05; pCBB4: deltamin=0.05, thetamin=0.05; pCBB5: deltamin=0.05, thetamin=1; pCBB6: deltamin=1, thetamin=1; pCBB7: deltamin = 0.05, thetamin = 1.
If empcumulative=TRUE like default, the algorithm uses for uniformization, empirical cumulative from x for x and empirical cumulative from y for y.
If empcumulative=FALSE, we need to put an specific cumulative1 and an specific cumulative2. If necessary, parameters1 contains the special parameter(s) for cumulative1 and parameters2 contains the special parameter(s) for cumulative2.
}
\value{
\item{Empirical}{empirical copula from (x,y)}
\item{Copula}{best copulamodel evaluated in (u,v)=(cumulative1(x),cumulative2(y))}
\item{fit}{performance from the best copulamodel in the neighbourhood. Result:
p.value in fit[1], delta in fit[2], theta in fit[3]}
\item{thetai}{theta's vector constructed in the neighbourhood}
\item{deltaj}{delta's vector constructed in the neighbourhood}
\item{pthetaideltaj}{p value matrix from each combination. The position (i,j) represents the p value from \sQuote{test} in thetai(i),deltaj(j) for copulamodel.}
}
\references{Veronica A. Gonzalez-Lopez and Nelson I. Tanaka. \sQuote{Bi-variate Data Modeling Through Generalized Archimedean Copula} RT-MAE 2003-03.
Harry Joe. \sQuote{Multivariate Models and Dependence Concepts} Monogra. Stat. & Appl. Probab. 73. Chapman and Hall (1997) }
\author{Veronica Andrea Gonzalez-Lopez }
\seealso{ \code{\link{fitlambdas}}, \code{\link{OptimCBB}} ~~~ }
\examples{#x<-rnorm(100)
#y<-x/10+rnorm(100)
#M<-fitCBB(x,y) # default fitting
#default: thetas0 and delta0 from fitlambdas function, m=15, step=0.01,
#copulamodel="pCBB1", test="wilcox.test", empcumulative=TRUE.
#
#M<-fitCBB(x,y,theta0=1.1,delta0=0.8,copulamodel="pCBB5",m=20,step=0.5,deltamin=0.1,thetamin=1.1,
#test="w",empcumulative=FALSE,cumulative1=pnorm,cumulative2=pnorm)
#
#x<-rnorm(100)
#y<-x/100+rnorm(100,5,2)
#M<-fitCBB(x,y,theta0=1.1,delta0=0.8,copulamodel="pCBB7",m=20,step=0.5,deltamin=0.1,thetamin=1.1,
#test="t",empcumulative=FALSE,cumulative1=pnorm,cumulative2=pnorm,parameters2=c(5,2))
}
\keyword{multivariate}
| /man/fitCBB.Rd | no_license | cran/fgac | R | false | false | 4,597 | rd | \name{fitCBB}
\alias{fitCBB}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{fitCBB}
\description{Fitting an specific generalized archimedean copula
}
\usage{
fitCBB(x, y, theta0, delta0, copulamodel = c("pCBB1", "pCBB2", "pCBB3", "pCBB4",
"pCBB5", "pCBB6", "pCBB7", "pCMax", "pCMin"), m, step, deltamin, thetamin,
test = c("wilcox.test", "t.test"), empcumulative = TRUE, cumulative1,
cumulative2, parameters1, parameters2)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{ real vector }
\item{y}{ real vector }
\item{theta0}{ parameter in the model pCBBi (in variable copulamodel). For default, theta0 is obtained from fitlambdas }
\item{delta0}{ parameter in the model pCBBi (in variable copulamodel). For default, delta0 is obtained from fitlambdas}
\item{copulamodel}{specific model that we need to fit, it need to be one option from: pCBB1 (default), pCBB2, pCBB3, pCBB4, pCBB5, pCBB6, pCBB7, pCMax, pCMin
}
\item{m}{ integer positive number (default=15) }
\item{step}{ real positive number (default=0.01) }
\item{deltamin}{ minimum value admited for delta's domain (default=epsilon-see details) }
\item{thetamin}{ minimum value admited for theta's domain (default=epsilon-see details) }
\item{test}{ test used for fitting selection, it need to be wilcox.test(default) or t.test }
\item{empcumulative}{ logical value, can be TRUE (default) or FALSE (see details) }
\item{cumulative1}{ marginal cumulative associated with x. Can be used pnorm, pbeta, pempirical,...(only used when empcumulative=FALSE)
}
\item{cumulative2}{ marginal cumulative associated with y. Can be used pnorm, pbeta, pempirical,...(only used when empcumulative=FALSE)
}
\item{parameters1}{ specifics parameters for cumulative1's definition }
\item{parameters2}{ specifics parameters for cumulative2's definition }
}
\details{The function constructs a neighbourhood around (theta0,delta0) for family specified in \sQuote{copulamodel} , and using the test specified in \sQuote{test} the function search the best (theta*,delta*) in the neighbourhood such that copulamodel(theta*,delta*,u,v) is close to the bivariate empirical copula from (x,y). Where (u,v)=(cumulative1(x),cumulative2(y)).
m and step control the neighbourhood' definition.
deltamin and thetamin depend on the model worked. For default, we have, pCBB1: deltamin=1, thetamin=0.05; pCBB2: deltamin=0.05, thetamin = 0.05; pCBB3: deltamin=1, thetamin=0.05; pCBB4: deltamin=0.05, thetamin=0.05; pCBB5: deltamin=0.05, thetamin=1; pCBB6: deltamin=1, thetamin=1; pCBB7: deltamin = 0.05, thetamin = 1.
If empcumulative=TRUE like default, the algorithm uses for uniformization, empirical cumulative from x for x and empirical cumulative from y for y.
If empcumulative=FALSE, we need to put an specific cumulative1 and an specific cumulative2. If necessary, parameters1 contains the special parameter(s) for cumulative1 and parameters2 contains the special parameter(s) for cumulative2.
}
\value{
\item{Empirical}{empirical copula from (x,y)}
\item{Copula}{best copulamodel evaluated in (u,v)=(cumulative1(x),cumulative2(y))}
\item{fit}{performance from the best copulamodel in the neighbourhood. Result:
p.value in fit[1], delta in fit[2], theta in fit[3]}
\item{thetai}{theta's vector constructed in the neighbourhood}
\item{deltaj}{delta's vector constructed in the neighbourhood}
\item{pthetaideltaj}{p value matrix from each combination. The position (i,j) represents the p value from \sQuote{test} in thetai(i),deltaj(j) for copulamodel.}
}
\references{Veronica A. Gonzalez-Lopez and Nelson I. Tanaka. \sQuote{Bi-variate Data Modeling Through Generalized Archimedean Copula} RT-MAE 2003-03.
Harry Joe. \sQuote{Multivariate Models and Dependence Concepts} Monogra. Stat. & Appl. Probab. 73. Chapman and Hall (1997) }
\author{Veronica Andrea Gonzalez-Lopez }
\seealso{ \code{\link{fitlambdas}}, \code{\link{OptimCBB}} ~~~ }
\examples{#x<-rnorm(100)
#y<-x/10+rnorm(100)
#M<-fitCBB(x,y) # default fitting
#default: thetas0 and delta0 from fitlambdas function, m=15, step=0.01,
#copulamodel="pCBB1", test="wilcox.test", empcumulative=TRUE.
#
#M<-fitCBB(x,y,theta0=1.1,delta0=0.8,copulamodel="pCBB5",m=20,step=0.5,deltamin=0.1,thetamin=1.1,
#test="w",empcumulative=FALSE,cumulative1=pnorm,cumulative2=pnorm)
#
#x<-rnorm(100)
#y<-x/100+rnorm(100,5,2)
#M<-fitCBB(x,y,theta0=1.1,delta0=0.8,copulamodel="pCBB7",m=20,step=0.5,deltamin=0.1,thetamin=1.1,
#test="t",empcumulative=FALSE,cumulative1=pnorm,cumulative2=pnorm,parameters2=c(5,2))
}
\keyword{multivariate}
|
#' naniar
#'
#' naniar is a package to make it easier to summarise and handle missing values
#' in R. It strives to do this in a way that is as consistent with tidyverse
#' principles as possible. The work is fully discussed at Tierney & Cook (2023)
#' <doi:10.18637/jss.v105.i07>.
#'
#' @name naniar
#' @docType package
#' @seealso [add_any_miss()] [add_label_missings()] [add_label_shadow()] [add_miss_cluster()] [add_n_miss()] [add_prop_miss()] [add_shadow()] [add_shadow_shift()] [as_shadow()] [bind_shadow()] [cast_shadow()] [cast_shadow_shift()] [cast_shadow_shift_label()] [draw_key_missing_point()] [gather_shadow()] [geom_miss_point()] [gg_miss_case()] [gg_miss_case_cumsum()] [gg_miss_fct()] [gg_miss_span()] [gg_miss_var()] [gg_miss_var_cumsum()] [gg_miss_which()] [label_miss_1d()] [label_miss_2d()] [label_missings()] [pct_miss_case()] [prop_miss_case()] [pct_miss_var()] [prop_miss_var()] [pct_complete_case()] [prop_complete_case()] [pct_complete_var()] [prop_complete_var()] [miss_prop_summary()] [miss_case_summary()] [miss_case_table()] [miss_summary()] [miss_var_prop()] [miss_var_run()] [miss_var_span()] [miss_var_summary()] [miss_var_table()] [n_complete()] [n_complete_row()] [n_miss()] [n_miss_row()] [pct_complete()] [pct_miss()] [prop_complete()] [prop_complete_row()] [prop_miss()] [prop_miss_row()] [replace_to_na()] [replace_with_na()] [replace_with_na_all()] [replace_with_na_at()] [replace_with_na_if()] [shadow_shift()] [stat_miss_point()] [vis_miss()] [where_na()]
#' @import ggplot2
#' @import rlang
NULL
#' @importFrom stats median
#' @importFrom visdat vis_miss
#' @export
visdat::vis_miss
if(getRversion() >= "2.15.1") utils::globalVariables(c("."))
globalVariables(
c(
"median",
"variable_NA",
".temp",
".temp_label",
"rows",
"..missing..",
"n_miss",
"case",
"variable",
"value",
"span_counter",
"n",
"n_miss",
"n_in_span",
"nheight",
"pct_miss",
"n_miss_in_case",
"values",
"n_miss_in_case",
"n_miss_in_var",
"n_vars",
"span_every",
"n_miss_cumsum",
"as.formula",
"complete.cases"
)
)
| /R/naniar-package.R | no_license | cran/naniar | R | false | false | 2,138 | r | #' naniar
#'
#' naniar is a package to make it easier to summarise and handle missing values
#' in R. It strives to do this in a way that is as consistent with tidyverse
#' principles as possible. The work is fully discussed at Tierney & Cook (2023)
#' <doi:10.18637/jss.v105.i07>.
#'
#' @name naniar
#' @docType package
#' @seealso [add_any_miss()] [add_label_missings()] [add_label_shadow()] [add_miss_cluster()] [add_n_miss()] [add_prop_miss()] [add_shadow()] [add_shadow_shift()] [as_shadow()] [bind_shadow()] [cast_shadow()] [cast_shadow_shift()] [cast_shadow_shift_label()] [draw_key_missing_point()] [gather_shadow()] [geom_miss_point()] [gg_miss_case()] [gg_miss_case_cumsum()] [gg_miss_fct()] [gg_miss_span()] [gg_miss_var()] [gg_miss_var_cumsum()] [gg_miss_which()] [label_miss_1d()] [label_miss_2d()] [label_missings()] [pct_miss_case()] [prop_miss_case()] [pct_miss_var()] [prop_miss_var()] [pct_complete_case()] [prop_complete_case()] [pct_complete_var()] [prop_complete_var()] [miss_prop_summary()] [miss_case_summary()] [miss_case_table()] [miss_summary()] [miss_var_prop()] [miss_var_run()] [miss_var_span()] [miss_var_summary()] [miss_var_table()] [n_complete()] [n_complete_row()] [n_miss()] [n_miss_row()] [pct_complete()] [pct_miss()] [prop_complete()] [prop_complete_row()] [prop_miss()] [prop_miss_row()] [replace_to_na()] [replace_with_na()] [replace_with_na_all()] [replace_with_na_at()] [replace_with_na_if()] [shadow_shift()] [stat_miss_point()] [vis_miss()] [where_na()]
#' @import ggplot2
#' @import rlang
NULL
#' @importFrom stats median
#' @importFrom visdat vis_miss
#' @export
visdat::vis_miss
if(getRversion() >= "2.15.1") utils::globalVariables(c("."))
globalVariables(
c(
"median",
"variable_NA",
".temp",
".temp_label",
"rows",
"..missing..",
"n_miss",
"case",
"variable",
"value",
"span_counter",
"n",
"n_miss",
"n_in_span",
"nheight",
"pct_miss",
"n_miss_in_case",
"values",
"n_miss_in_case",
"n_miss_in_var",
"n_vars",
"span_every",
"n_miss_cumsum",
"as.formula",
"complete.cases"
)
)
|
n1 <- 1-exp(-5.2/7)
n2 <- -log(0.89)*7
n3 <- 1-exp(-8.7/7)-(1-exp(-6.7/7))
n4 <- function(u) {if(u>0) {(2/7)*u*exp(-(u^2)/7)} else 0}
print(n1)
print(n2)
print(n3)
print(n4(1)) | /R/2019-05-02 Statistica exercise.r | no_license | MarcoDiFrancesco/LittleProjects | R | false | false | 177 | r | n1 <- 1-exp(-5.2/7)
n2 <- -log(0.89)*7
n3 <- 1-exp(-8.7/7)-(1-exp(-6.7/7))
n4 <- function(u) {if(u>0) {(2/7)*u*exp(-(u^2)/7)} else 0}
print(n1)
print(n2)
print(n3)
print(n4(1)) |
# MIT License
#
# Copyright (c) 2018 Diviyan Kalainathan
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
library(CAM)
selGam <-
function(X,pars = list(cutOffPVal = 0.001, numBasisFcts = 10),output = FALSE,k) {
result <- list()
p <- dim(as.matrix(X))
if(p[2] > 1) {
selVec <- rep(FALSE, p[2])
mod_gam <- CAM:::train_gam(X[,-k],as.matrix(X[,k]),pars)
pValVec <- summary.gam(mod_gam$model)$s.pv
pValVec <- summary.gam(mod_gam$model)$s.pv
if(output) {
cat("vector of p-values:", pValVec, "\n")
}
if(length(pValVec) != length(selVec[-k])) {
show("This should never happen (function selGam).")
}
selVec[-k] <- (pValVec < pars$cutOffPVal)
} else {
selVec <- list()
}
return(selVec)
}
# for PNS
selGamBoost <-
function(X,pars = list(atLeastThatMuchSelected = 0.02, atMostThatManyNeighbors = 10),output = FALSE,k) {
if(output)
{
cat("Performing variable selection for variable", k, ": \n")
}
result <- list()
p <- dim(as.matrix(X))
if(p[2] > 1)
{
selVec <- rep(FALSE, p[2])
modfitGam <- CAM:::train_GAMboost(X[,-k],X[,k],pars)
cc <- unique(modfitGam$model$xselect())
if(output)
{
cat("The following variables \n")
show(cc)
}
nstep <- length(modfitGam$model$xselect())
howOftenSelected <- rep(NA,length(cc))
for(i in 1:length(cc))
{
howOftenSelected[i] <- sum(modfitGam$model$xselect() == cc[i])/nstep
}
if(output)
{
cat("... have been selected that many times: \n")
show(howOftenSelected)
}
howOftenSelectedSorted <- sort(howOftenSelected, decreasing = TRUE)
if( sum(howOftenSelected>pars$atLeastThatMuchSelected) > pars$atMostThatManyNeighbors)
{
cc <- cc[howOftenSelected>howOftenSelectedSorted[pars$atMostThatManyNeighbors + 1]]
} else
{
cc <- cc[howOftenSelected>pars$atLeastThatMuchSelected]
}
if(output)
{
cat("We finally choose as possible parents: \n")
show(cc)
cat("\n")
}
tmp <- rep(FALSE,p[2]-1)
tmp[cc] <- TRUE
selVec[-k] <- tmp
} else
{
selVec <- list()
}
return(selVec)
}
pruning <-
function(X, G, output = FALSE, pruneMethod = selGam, pruneMethodPars = list(cutOffPVal = 0.001, numBasisFcts = 10), intervention = FALSE, interv_mat = FALSE) {
p <- dim(G)[1]
finalG <- matrix(0,p,p)
for(i in 1:p) {
parents <- which(G[,i]==1)
lenpa <- length(parents)
if(output) {
cat("pruning variable:", i, "\n")
cat("considered parents:", parents, "\n")
}
if(lenpa>0) {
# use only the observational data of variable i
Xobs <- X
if (intervention == TRUE){
obs <- which(interv_mat[,i]==0)
if(length(obs) > 0){
Xobs <- Xobs[obs,]
}
}
Xtmp <- cbind(Xobs[,parents],Xobs[,i])
selectedPar <- pruneMethod(Xtmp, k = lenpa + 1, pars = pruneMethodPars, output = output)
finalParents <- parents[selectedPar]
finalG[finalParents,i] <- 1
}
}
return(finalG)
}
CAM_with_score <-
function(X, scoreName = "SEMGAM",
parsScore = list(numBasisFcts=10),
numCores = 1,
maxNumParents = min(dim(X)[2] - 1, round(dim(X)[1]/20)),
output = FALSE,
variableSel = FALSE,
variableSelMethod = selGamBoost,
variableSelMethodPars = list(atLeastThatMuchSelected = 0.02, atMostThatManyNeighbors = 10),
is_pruning = FALSE,
pruneMethod = selGam,
pruneMethodPars = list(cutOffPVal = 0.001, numBasisFcts=10),
intervData = FALSE,
intervMat = NA,
intervMatVal = NA,
X_val=NULL)
{
require(CAM)
if(output)
{
cat("number of cores:", numCores, "\n")
}
# We record the time consumption. They are shown if output == TRUE
timeCycle <- 0
timeUpdate <- 0
timeScoreMat <- 0
timeSel <- 0
timePrune <- 0
timeMax <- 0
# we record how the score develops
scoreVec <- integer(0)
scoreVec_val <- integer(0)
# and which edges are added
edgeList <- integer(0)
# this counter is only used if output = TRUE
counterUpdate <- 0
p <- dim(X)[2]
####
# STEP 1: variable selection
####
# A matrix selMat is constructed. Entry (i,j) being one means that i is a possible parent of j.
if(variableSel)
{
ptm <- proc.time()[3]
X2 <- X
if(numCores == 1)
{
selMat <- mapply(variableSelMethod,MoreArgs = list(X = X2, pars = variableSelMethodPars, output = output),1:p)
} else
{
selMat <- mcmapply(variableSelMethod,MoreArgs = list(X = X2, pars = variableSelMethodPars, output = output),1:p, mc.cores = numCores)
}
# The next line includes j as a possible parent of i if i is considered a possible parent of j
# selMat <- selMat | t(selMat)
cou <- 0
for(jk in 1:p)
{
cou <- cou + 2^{sum(selMat[,jk])}
}
if(output)
{
cat("Instead of p2^(p-1) -Sillander- ",p*2^(p-1) ," we have ", cou, "\n")
cat("Greedy, on the other hand, is computing ",sum(selMat) ," entries. \n")
}
timeSel <- timeSel + proc.time()[3] - ptm
} else
{
selMat <- matrix(TRUE, p,p)
}
if(variableSel & output)
{
if(output)
{
if(p<30)
{
cat("This is the matrix of possible parents after the first step.\n")
show(selMat)
}
cat("Object size of selmat: ", object.size(selMat), "\n")
}
}
####
# STEP 2: Include Edges
####
# compute score matrix
ptm <- proc.time()[3]
computeScoreMatTmp <- computeScoreMat_with_score(X, X_val, scoreName=scoreName, numParents = 1, numCores = numCores, output = output, selMat = selMat, parsScore = parsScore, intervMat = intervMat, intervData = intervData, intervMatVal = intervMatVal)
original_score_mat <- computeScoreMatTmp$scoreMatOriginal
original_score_mat_val <- computeScoreMatTmp$scoreMat_val
timeScoreMat <- timeScoreMat + proc.time()[3] - ptm
if(output)
{
cat("Object size of computeScoreMatTmp: ", object.size(computeScoreMatTmp), "\n" )
}
# We need the pathMatrix (entry (i,j) being one means that there is a directed path from i to j) in order to keep track of possible cycles.
pathMatrix <- matrix(0,p,p)
diag(pathMatrix) <- rep(1,p)
Adj <- as(matrix(0,p,p), "sparseMatrix")
scoreNodes <- computeScoreMatTmp$scoreEmptyNodes
scoreNodes_val <- computeScoreMatTmp$scoreEmptyNodes_val
i <- 0
# Greedily adding edges
while(sum(computeScoreMatTmp$scoreMat!=-Inf) > 0)
{
# Find the best edge
ptm <- proc.time()[3]
ix_max <- arrayInd(which.max(computeScoreMatTmp$scoreMat), dim(computeScoreMatTmp$scoreMat))
ix_max_backward <- matrix(c(ix_max[2],ix_max[1]),1,2)
if(i == 0){
order_ix_max <- data.frame(t(ix_max))
}else{
order_ix_max <- cbind(order_ix_max, t(ix_max))
}
timeMax <- timeMax + proc.time()[3] - ptm
Adj[ix_max] <- 1
scoreNodes[ix_max[2]] <- scoreNodes[ix_max[2]] + computeScoreMatTmp$scoreMat[ix_max]
scoreNodes_val[ix_max[2]] <- scoreNodes_val[ix_max[2]] + computeScoreMatTmp$scoreMat_val[ix_max]
if(output)
{
cat("\n Included edge (from, to) ", ix_max, "\n")
}
# Do not include the same edge twice.
computeScoreMatTmp$scoreMat[ix_max] <- -Inf
computeScoreMatTmp$scoreMat_val[ix_max] <- -Inf
# Avoid cycles
ptm <- proc.time()[3]
pathMatrix[ix_max[1],ix_max[2]] <- 1
DescOfNewChild <- which(pathMatrix[ix_max[2],]==1)
AncOfNewParent <- which(pathMatrix[,ix_max[1]]==1)
pathMatrix[AncOfNewParent,DescOfNewChild] <- 1
computeScoreMatTmp$scoreMat[t(pathMatrix) == 1] <- -Inf
computeScoreMatTmp$scoreMat[ix_max[2],ix_max[1]] <- -Inf
computeScoreMatTmp$scoreMat_val[t(pathMatrix) == 1] <- -Inf
computeScoreMatTmp$scoreMat_val[ix_max[2],ix_max[1]] <- -Inf
timeCycle <- timeCycle + proc.time()[3] - ptm
# Record the score of the current graph
scoreVec <- c(scoreVec, sum(scoreNodes))
scoreVec_val <- c(scoreVec_val, sum(scoreNodes_val))
# Record which edge has been added
edgeList <- rbind(edgeList, ix_max, deparse.level=0)
# Update the score of column j
ptm <- proc.time()[3]
updates <- updateScoreMat_with_score(computeScoreMatTmp$scoreMat, original_score_mat, computeScoreMatTmp$scoreMat_val, X, X_val, scoreName = scoreName, ix_max[1], ix_max[2], scoreNodes, scoreNodes_val, Adj, numCores=numCores, output = output, maxNumParents = maxNumParents, parsScore = parsScore, intervMat = intervMat, intervData = intervData, intervMatVal = intervMatVal)
computeScoreMatTmp$scoreMat <- updates$scoreMat
original_score_mat <- updates$scoreMat_original
computeScoreMatTmp$scoreMat_val <- updates$scoreMat_val
timeUpdate <- timeUpdate + proc.time()[3] - ptm
counterUpdate <- counterUpdate + 1
i <- i + 1
}
# print("Score before pruning:")
# print(tail(scoreVec, n=1))
# print(tail(scoreVec_val, n=1))
Adj_prepruned <- as.matrix(Adj)
####
# STEP 3: Prune the DAG
####
if(is_pruning)
{
# if(intervData)
# {
# X2 <- X[rowSums(intervMat) == 0,]
# cat("The preliminary neighbourhood selection is done with the observational data only.\n")
# } else
# {
# X2 <- X
# }
X2 <- X
if(output)
{
cat("\n Performing pruning ... \n ")
}
ptm <- proc.time()[3]
Adj <- pruning(X=X2,G=Adj,pruneMethod = pruneMethod, pruneMethodPars = pruneMethodPars, output=output, intervention={INTERVENTION}, interv_mat = intervMat)
Adj_tmp <- as(matrix(0,p,p), "sparseMatrix")
computeScoreMatTmp <- computeScoreMat_with_score(X, X_val, scoreName=scoreName, numParents = 1, numCores = numCores, output = output, selMat = selMat, parsScore = parsScore, intervMat = intervMat, intervData = intervData, intervMatVal = intervMatVal)
original_score_mat <- computeScoreMatTmp$scoreMatOriginal
original_score_mat_val <- computeScoreMatTmp$scoreMat_val
timePrune <- timePrune + proc.time()[3] - ptm
#Recompute the score after pruning
scoreNodes <- computeScoreMatTmp$scoreEmptyNodes
scoreNodes_val <- computeScoreMatTmp$scoreEmptyNodes_val
scoreVec <- c()
scoreVec_val <- c()
for(i in 1:dim(order_ix_max)[2]){
ix_max <- order_ix_max[,i]
if(Adj[ix_max[1], ix_max[2]] == 1){
Adj_tmp[ix_max[1], ix_max[2]] <- 1
ix_max_backward <- matrix(c(ix_max[2],ix_max[1]),1,2)
scoreNodes[ix_max[2]] <- scoreNodes[ix_max[2]] + computeScoreMatTmp$scoreMat[ix_max[1], ix_max[2]]
scoreNodes_val[ix_max[2]] <- scoreNodes_val[ix_max[2]] + computeScoreMatTmp$scoreMat_val[ix_max[1], ix_max[2]]
# Record the score of the current graph
scoreVec <- c(scoreVec, sum(scoreNodes))
scoreVec_val <- c(scoreVec_val, sum(scoreNodes_val))
# Update the score of column j
updates <- updateScoreMat_with_score(computeScoreMatTmp$scoreMat, original_score_mat, computeScoreMatTmp$scoreMat_val, X, X_val, scoreName = scoreName, ix_max[1], ix_max[2], scoreNodes, scoreNodes_val, Adj_tmp, numCores=numCores, output = output, maxNumParents = maxNumParents, parsScore = parsScore, intervMat = intervMat, intervData = intervData, intervMatVal = intervMatVal)
computeScoreMatTmp$scoreMat <- updates$scoreMat
original_score_mat <- updates$scoreMat_original
computeScoreMatTmp$scoreMat_val <- updates$scoreMat_val
}
}
}
# print("Score after pruning:")
# print(tail(scoreVec, n=1))
# print(tail(scoreVec_val, n=1))
final_score <- tail(scoreVec, n=1)
final_score_val <- tail(scoreVec_val, n=1)
show(paste("number of edges: ", sum(Adj), sep=""))
####
# Output and return
####
timeTotal <- timeSel + timeScoreMat + timeCycle + timeUpdate + timeMax + timePrune
if(output)
{
cat("amount of time for variable selection:",timeSel,"\n")
cat("amount of time computing the initial scoreMat:",timeScoreMat,"\n")
cat("amount of time checking for cycles:",timeCycle,"\n")
cat("amount of time computing updates for the scoreMat:",timeUpdate,", doing",counterUpdate,"updates.\n")
cat("amount of time for pruning:",timePrune,"\n")
cat("amount of time for finding maximum:",timeMax,"\n")
cat("amount of time in total:",timeTotal,"\n")
}
result <- list(Adj = Adj, Score = sum(scoreNodes), timesVec = c(timeSel, timeScoreMat, timeCycle, timeUpdate, timePrune, timeMax, timeTotal), scoreVec = scoreVec, edgeList = edgeList, final_score = final_score, final_score_val = final_score_val)
return(result)
}
computeScoreMat_with_score <-
function(X, X_val, scoreName, numParents, output, numCores, selMat, parsScore, intervMat, intervData, intervMatVal = NA)
{
# numParents indicates how many parents we consider. If numParents = 1 (default), then the
# score matrix is of dimension (p-1) x p. If numParents = 2, then the
# score matrix is of dimension (p-1)(p-2) x p and so on...
#
# scoreMat[i,j] equals the GAIN in score if we consider i being a parent of j.
# it should therefore be positive.
p <- dim(X)[2]
n <- dim(X)[1]
rowParents <- t(combn(p,numParents))
tt <- expand.grid(1:dim(rowParents)[1], 1:p)
allNode2 <- tt[,2]
allI <- tt[,1]
if(numCores == 1)
{
scoreMat <- mapply(computeScoreMatParallel_with_score, MoreArgs = list(rowParents = rowParents, selMat = selMat, scoreName = scoreName, X = X, X_val = X_val, output = output, parsScore = parsScore, intervMat = intervMat, intervData = intervData, intervMatVal = intervMatVal), node2 = allNode2, i = allI)
} else
{
scoreMat <- mcmapply(computeScoreMatParallel_with_score, MoreArgs = list(rowParents = rowParents, selMat = selMat, scoreName = scoreName, X = X, X_val = X_val, output = output, parsScore = parsScore, intervMat = intervMat, intervData = intervData, intervMatVal = intervMatVal), node2 = allNode2, i = allI, mc.cores = numCores)
}
print(dim(scoreMat))
scoreMat_train <- matrix(unlist(scoreMat[1,]),dim(rowParents)[1],p)
scoreMat_train_original <-matrix(unlist(scoreMat[1,]),dim(rowParents)[1],p)
scoreMat_val <- matrix(unlist(scoreMat[2,]),dim(rowParents)[1],p)
# initScore[i] equals the variance of variable j.
initScore <- rep(NA,p)
for(i in 1:p)
{
if(intervData)
{
X2 <- X[!intervMat[,i],]
} else
{
X2 <- X
}
vartmp <- var(X2[,i])
initScore[i] <- -log(vartmp)
# scoreMat[i,j] equals the GAIN in score if we consider i being a parent of j.
scoreMat_train[,i] <- scoreMat_train[,i] - initScore[i]
}
initScore_val <- rep(NA,p)
for(i in 1:p)
{
if(intervData)
{
X2 <- X_val[!intervMatVal[,i],]
} else
{
X2 <- X_val
}
vartmp_val <- var(X2[,i])
initScore_val[i] <- -log(vartmp_val)
scoreMat_val[,i] <- scoreMat_val[,i] - initScore_val[i]
}
return(list(scoreMat = scoreMat_train, scoreMatOriginal = scoreMat_train_original, scoreMat_val = scoreMat_val, rowParents = rowParents, scoreEmptyNodes = initScore, scoreEmptyNodes_val = initScore_val))
}
computeScoreMatParallel_with_score <-
function(rowParents, scoreName, X, X_val, selMat, output, node2, i, parsScore, intervMat, intervData, update=FALSE, intervMatVal = NA)
{
#the i-th row of rowParents contains possible parents of node2 (we call them "parentsToCheck")
parentsToCheck <- rowParents[i,]
if(output)
{
cat("\r compute score entry for regressing",node2,"on",parentsToCheck," \r")
}
if(intervData)
{
X2 <- X[!intervMat[,node2],]
X_val2 <- X_val[!intervMatVal[,node2],]
} else
{
X2 <- X
X_val2 <- X_val
}
if(!(node2 %in% parentsToCheck) && (prod(selMat[parentsToCheck,node2]) == TRUE))
{
if(scoreName == "SEMSEV")
{
stop("This score does not work. It does not decouple.")
} else if(scoreName == "SEMIND")
{
stop("NOT IMPLEMENTED YET")
} else if(scoreName == "SEMGAM")
{
mod_gam <- train_gam_with_score(X2[,parentsToCheck],X2[,node2],pars=parsScore)
score <- (-log(var(mod_gam$residuals)))
#TODO: check here...
nb_var <- count(mod_gam$formula, "+") + 1
data_frame <- data.frame(X_val2[,parentsToCheck])
var_name <- c()
for (v in 1:nb_var){
var_name <- c(var_name, paste("var", v+1, sep=""))
}
colnames(data_frame) <- c(var_name)
val <- predict.gam(mod_gam$model, data_frame)
residual <- X_val2[,node2] - val
score_val <- (-log(var(residual)))
} else if(scoreName == "SEMLIN")
{
if(intervData){
stop("Not implemented for interventions. Use SEMGAM")
}
mod_gam <- train_linear(X2[,parentsToCheck],X2[,node2])
score <- (-log(var(mod_gam$residuals)))
} else if(scoreName == "SEMGP")
{
if(intervData){
stop("Not implemented for interventions. Use SEMGAM")
}
mod_gp <- train_gp(X2[,parentsToCheck],X2[,node2])
score <- (-log(var(mod_gp$residuals)))
} else
{
stop("I do not know this score function.")
}
} else
{
score <- (-Inf)
score_val <- (-Inf)
}
return(list(score_train = score, score_val = score_val))
}
count = function(haystack, needle)
{v = attr(gregexpr(needle, haystack, fixed = T)[[1]], "match.length")
if (identical(v, -1L)) 0 else length(v)}
train_gam_with_score <-
function(X,y,pars = list(numBasisFcts = 10))
{
if(!("numBasisFcts" %in% names(pars) ))
{
pars$numBasisFcts = 10
}
p <- dim(as.matrix(X))
if(p[1]/p[2] < 3*pars$numBasisFcts)
{
pars$numBasisFcts <- ceiling(p[1]/(3*p[2]))
cat("changed number of basis functions to ", pars$numBasisFcts, " in order to have enough samples per basis function\n")
}
dat <- data.frame(as.matrix(y),as.matrix(X))
coln <- rep("null",p[2]+1)
for(i in 1:(p[2]+1))
{
coln[i] <- paste("var",i,sep="")
}
colnames(dat) <- coln
labs<-"var1 ~ "
if(p[2] > 1)
{
for(i in 2:p[2])
{
labs<-paste(labs,"s(var",i,",k = ",pars$numBasisFcts,") + ",sep="")
}
}
labs<-paste(labs,"s(var",p[2]+1,",k = ",pars$numBasisFcts,")",sep="")
mod_gam <- FALSE
try(mod_gam <- gam(formula=formula(labs), data=dat),silent = TRUE)
if(typeof(mod_gam) == "logical")
{
cat("There was some error with gam. The smoothing parameter is set to zero.\n")
labs<-"var1 ~ "
if(p[2] > 1)
{
for(i in 2:p[2])
{
labs<-paste(labs,"s(var",i,",k = ",pars$numBasisFcts,",sp=0) + ",sep="")
}
}
labs<-paste(labs,"s(var",p[2]+1,",k = ",pars$numBasisFcts,",sp=0)",sep="")
mod_gam <- gam(formula=formula(labs), data=dat)
}
result <- list()
result$Yfit <- as.matrix(mod_gam$fitted.values)
result$residuals <- as.matrix(mod_gam$residuals)
result$model <- mod_gam
result$df <- mod_gam$df.residual
result$edf <- mod_gam$edf
result$edf1 <- mod_gam$edf1
result$formula <- labs
# for degree of freedom see mod_gam$df.residual
# for aic see mod_gam$aic
return(result)
}
updateScoreMat_with_score <-
function(scoreMat, scoreMat_train_original, scoreMat_val, X, X_val, scoreName, i, j, scoreNodes, scoreNodes_val, Adj, output, numCores, maxNumParents, parsScore, intervMat, intervData, intervMatVal = NA)
# new edge: from i to j
{
p <- dim(X)[2]
existingParOfJ <- which(Adj[,j] == 1)
notAllowedParOfJ <- setdiff(which(scoreMat[,j] == -Inf), c(existingParOfJ,j))
# if there is something left that we need to update
if(length(existingParOfJ) + length(notAllowedParOfJ) < p-1)
{
# update column for j
rowParents <- matrix(c(existingParOfJ,NA), p, length(existingParOfJ)+1, byrow = TRUE)
rowParents[,length(existingParOfJ)+1] <- 1:p
toUpdate <- setdiff(1:p,c(j,existingParOfJ,notAllowedParOfJ))
if(length(existingParOfJ)< maxNumParents)
{
if(numCores == 1)
{
scoreUpdate <- mapply(computeScoreMatParallel_with_score,MoreArgs = list(rowParents = rowParents, selMat = matrix(TRUE,p,p), scoreName = scoreName, X = X, X_val = X_val, output = output, node2 = j, parsScore = parsScore, intervMat = intervMat, intervData = intervData, update=TRUE, intervMatVal = intervMatVal), i = toUpdate)
} else
{
scoreUpdate <- mcmapply(computeScoreMatParallel_with_score,MoreArgs = list(rowParents = rowParents, selMat = matrix(TRUE,p,p), scoreName = scoreName, X = X, X_val = X_val, output = output, node2 = j, parsScore = parsScore, intervMat = intervMat, intervData = intervData, update=TRUE, intervMatVal = intervMatVal), i = toUpdate, mc.cores = numCores)
}
score_train <- scoreUpdate['score_train',]
score_val <- scoreUpdate['score_val',]
scoreUpdate$score <- unlist(score_train)
scoreUpdate$score_val <- unlist(score_val)
} else
{
scoreUpdate$score <- -Inf - scoreNodes[j]
scoreUpdate$score_val <- -Inf - scoreNodes_val[j]
}
scoreMat_train_original[toUpdate,j] <- scoreUpdate$score
scoreMat[toUpdate,j] <- scoreUpdate$score - scoreNodes[j]
scoreMat_val[toUpdate,j] <- scoreUpdate$score_val - scoreNodes_val[j]
}
return(list(scoreMat = scoreMat, scoreMat_original = scoreMat_train_original, scoreMat_val = scoreMat_val))
}
set.seed(42)
# load the datasets
dataset <- read.csv(file='{FOLDER}{FILE_TRAIN}', sep=",", header=FALSE);
dataset_val <- read.csv(file='{FOLDER}{FILE_VALID}', sep=",", header=FALSE);
if({INTERVENTION}){
show("Using intervention")
# load the intervention matrices
mask <- read.csv(file='{FOLDER}{TARGETS_TRAIN}', sep=",", header=FALSE)
mask_val <- read.csv(file='{FOLDER}{TARGETS_VALID}', sep=",", header=FALSE)
interv_mat <- ! mask
interv_mat_val <- ! mask_val
estDAG <- CAM_with_score(dataset, X_val = dataset_val, intervData = TRUE, intervMat = interv_mat, intervMatVal = interv_mat_val, scoreName = "{SCORE}",
numCores = {NJOBS}, output = {VERBOSE}, variableSel = {VARSEL}, variableSelMethod = {SELMETHOD},
is_pruning = {PRUNING}, pruneMethod = {PRUNMETHOD}, pruneMethodPars = list(cutOffPVal = {CUTOFF}))
}else{
show("Not using intervention")
estDAG <- CAM_with_score(dataset, X_val = dataset_val, scoreName = "{SCORE}", numCores = {NJOBS}, output = {VERBOSE},
variableSel = {VARSEL}, variableSelMethod = {SELMETHOD}, is_pruning = {PRUNING},
pruneMethod = {PRUNMETHOD}, pruneMethodPars = list(cutOffPVal = {CUTOFF}))
}
# save estimated DAG
write.csv(as.matrix(estDAG$Adj),row.names = FALSE, file = '{FOLDER}{OUTPUT}');
# save scores (train and val)
scores <- c(estDAG$final_score, estDAG$final_score_val)
write.csv(scores, row.names = FALSE, file = '{FOLDER}{OUTPUT2}');
| /cam/cam_with_score.R | permissive | jcai-sc/dcdi | R | false | false | 25,477 | r | # MIT License
#
# Copyright (c) 2018 Diviyan Kalainathan
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
library(CAM)
selGam <-
function(X,pars = list(cutOffPVal = 0.001, numBasisFcts = 10),output = FALSE,k) {
result <- list()
p <- dim(as.matrix(X))
if(p[2] > 1) {
selVec <- rep(FALSE, p[2])
mod_gam <- CAM:::train_gam(X[,-k],as.matrix(X[,k]),pars)
pValVec <- summary.gam(mod_gam$model)$s.pv
pValVec <- summary.gam(mod_gam$model)$s.pv
if(output) {
cat("vector of p-values:", pValVec, "\n")
}
if(length(pValVec) != length(selVec[-k])) {
show("This should never happen (function selGam).")
}
selVec[-k] <- (pValVec < pars$cutOffPVal)
} else {
selVec <- list()
}
return(selVec)
}
# for PNS
selGamBoost <-
function(X,pars = list(atLeastThatMuchSelected = 0.02, atMostThatManyNeighbors = 10),output = FALSE,k) {
if(output)
{
cat("Performing variable selection for variable", k, ": \n")
}
result <- list()
p <- dim(as.matrix(X))
if(p[2] > 1)
{
selVec <- rep(FALSE, p[2])
modfitGam <- CAM:::train_GAMboost(X[,-k],X[,k],pars)
cc <- unique(modfitGam$model$xselect())
if(output)
{
cat("The following variables \n")
show(cc)
}
nstep <- length(modfitGam$model$xselect())
howOftenSelected <- rep(NA,length(cc))
for(i in 1:length(cc))
{
howOftenSelected[i] <- sum(modfitGam$model$xselect() == cc[i])/nstep
}
if(output)
{
cat("... have been selected that many times: \n")
show(howOftenSelected)
}
howOftenSelectedSorted <- sort(howOftenSelected, decreasing = TRUE)
if( sum(howOftenSelected>pars$atLeastThatMuchSelected) > pars$atMostThatManyNeighbors)
{
cc <- cc[howOftenSelected>howOftenSelectedSorted[pars$atMostThatManyNeighbors + 1]]
} else
{
cc <- cc[howOftenSelected>pars$atLeastThatMuchSelected]
}
if(output)
{
cat("We finally choose as possible parents: \n")
show(cc)
cat("\n")
}
tmp <- rep(FALSE,p[2]-1)
tmp[cc] <- TRUE
selVec[-k] <- tmp
} else
{
selVec <- list()
}
return(selVec)
}
pruning <-
function(X, G, output = FALSE, pruneMethod = selGam, pruneMethodPars = list(cutOffPVal = 0.001, numBasisFcts = 10), intervention = FALSE, interv_mat = FALSE) {
p <- dim(G)[1]
finalG <- matrix(0,p,p)
for(i in 1:p) {
parents <- which(G[,i]==1)
lenpa <- length(parents)
if(output) {
cat("pruning variable:", i, "\n")
cat("considered parents:", parents, "\n")
}
if(lenpa>0) {
# use only the observational data of variable i
Xobs <- X
if (intervention == TRUE){
obs <- which(interv_mat[,i]==0)
if(length(obs) > 0){
Xobs <- Xobs[obs,]
}
}
Xtmp <- cbind(Xobs[,parents],Xobs[,i])
selectedPar <- pruneMethod(Xtmp, k = lenpa + 1, pars = pruneMethodPars, output = output)
finalParents <- parents[selectedPar]
finalG[finalParents,i] <- 1
}
}
return(finalG)
}
CAM_with_score <-
function(X, scoreName = "SEMGAM",
parsScore = list(numBasisFcts=10),
numCores = 1,
maxNumParents = min(dim(X)[2] - 1, round(dim(X)[1]/20)),
output = FALSE,
variableSel = FALSE,
variableSelMethod = selGamBoost,
variableSelMethodPars = list(atLeastThatMuchSelected = 0.02, atMostThatManyNeighbors = 10),
is_pruning = FALSE,
pruneMethod = selGam,
pruneMethodPars = list(cutOffPVal = 0.001, numBasisFcts=10),
intervData = FALSE,
intervMat = NA,
intervMatVal = NA,
X_val=NULL)
{
require(CAM)
if(output)
{
cat("number of cores:", numCores, "\n")
}
# We record the time consumption. They are shown if output == TRUE
timeCycle <- 0
timeUpdate <- 0
timeScoreMat <- 0
timeSel <- 0
timePrune <- 0
timeMax <- 0
# we record how the score develops
scoreVec <- integer(0)
scoreVec_val <- integer(0)
# and which edges are added
edgeList <- integer(0)
# this counter is only used if output = TRUE
counterUpdate <- 0
p <- dim(X)[2]
####
# STEP 1: variable selection
####
# A matrix selMat is constructed. Entry (i,j) being one means that i is a possible parent of j.
if(variableSel)
{
ptm <- proc.time()[3]
X2 <- X
if(numCores == 1)
{
selMat <- mapply(variableSelMethod,MoreArgs = list(X = X2, pars = variableSelMethodPars, output = output),1:p)
} else
{
selMat <- mcmapply(variableSelMethod,MoreArgs = list(X = X2, pars = variableSelMethodPars, output = output),1:p, mc.cores = numCores)
}
# The next line includes j as a possible parent of i if i is considered a possible parent of j
# selMat <- selMat | t(selMat)
cou <- 0
for(jk in 1:p)
{
cou <- cou + 2^{sum(selMat[,jk])}
}
if(output)
{
cat("Instead of p2^(p-1) -Sillander- ",p*2^(p-1) ," we have ", cou, "\n")
cat("Greedy, on the other hand, is computing ",sum(selMat) ," entries. \n")
}
timeSel <- timeSel + proc.time()[3] - ptm
} else
{
selMat <- matrix(TRUE, p,p)
}
if(variableSel & output)
{
if(output)
{
if(p<30)
{
cat("This is the matrix of possible parents after the first step.\n")
show(selMat)
}
cat("Object size of selmat: ", object.size(selMat), "\n")
}
}
####
# STEP 2: Include Edges
####
# compute score matrix
ptm <- proc.time()[3]
computeScoreMatTmp <- computeScoreMat_with_score(X, X_val, scoreName=scoreName, numParents = 1, numCores = numCores, output = output, selMat = selMat, parsScore = parsScore, intervMat = intervMat, intervData = intervData, intervMatVal = intervMatVal)
original_score_mat <- computeScoreMatTmp$scoreMatOriginal
original_score_mat_val <- computeScoreMatTmp$scoreMat_val
timeScoreMat <- timeScoreMat + proc.time()[3] - ptm
if(output)
{
cat("Object size of computeScoreMatTmp: ", object.size(computeScoreMatTmp), "\n" )
}
# We need the pathMatrix (entry (i,j) being one means that there is a directed path from i to j) in order to keep track of possible cycles.
pathMatrix <- matrix(0,p,p)
diag(pathMatrix) <- rep(1,p)
Adj <- as(matrix(0,p,p), "sparseMatrix")
scoreNodes <- computeScoreMatTmp$scoreEmptyNodes
scoreNodes_val <- computeScoreMatTmp$scoreEmptyNodes_val
i <- 0
# Greedily adding edges
while(sum(computeScoreMatTmp$scoreMat!=-Inf) > 0)
{
# Find the best edge
ptm <- proc.time()[3]
ix_max <- arrayInd(which.max(computeScoreMatTmp$scoreMat), dim(computeScoreMatTmp$scoreMat))
ix_max_backward <- matrix(c(ix_max[2],ix_max[1]),1,2)
if(i == 0){
order_ix_max <- data.frame(t(ix_max))
}else{
order_ix_max <- cbind(order_ix_max, t(ix_max))
}
timeMax <- timeMax + proc.time()[3] - ptm
Adj[ix_max] <- 1
scoreNodes[ix_max[2]] <- scoreNodes[ix_max[2]] + computeScoreMatTmp$scoreMat[ix_max]
scoreNodes_val[ix_max[2]] <- scoreNodes_val[ix_max[2]] + computeScoreMatTmp$scoreMat_val[ix_max]
if(output)
{
cat("\n Included edge (from, to) ", ix_max, "\n")
}
# Do not include the same edge twice.
computeScoreMatTmp$scoreMat[ix_max] <- -Inf
computeScoreMatTmp$scoreMat_val[ix_max] <- -Inf
# Avoid cycles
ptm <- proc.time()[3]
pathMatrix[ix_max[1],ix_max[2]] <- 1
DescOfNewChild <- which(pathMatrix[ix_max[2],]==1)
AncOfNewParent <- which(pathMatrix[,ix_max[1]]==1)
pathMatrix[AncOfNewParent,DescOfNewChild] <- 1
computeScoreMatTmp$scoreMat[t(pathMatrix) == 1] <- -Inf
computeScoreMatTmp$scoreMat[ix_max[2],ix_max[1]] <- -Inf
computeScoreMatTmp$scoreMat_val[t(pathMatrix) == 1] <- -Inf
computeScoreMatTmp$scoreMat_val[ix_max[2],ix_max[1]] <- -Inf
timeCycle <- timeCycle + proc.time()[3] - ptm
# Record the score of the current graph
scoreVec <- c(scoreVec, sum(scoreNodes))
scoreVec_val <- c(scoreVec_val, sum(scoreNodes_val))
# Record which edge has been added
edgeList <- rbind(edgeList, ix_max, deparse.level=0)
# Update the score of column j
ptm <- proc.time()[3]
updates <- updateScoreMat_with_score(computeScoreMatTmp$scoreMat, original_score_mat, computeScoreMatTmp$scoreMat_val, X, X_val, scoreName = scoreName, ix_max[1], ix_max[2], scoreNodes, scoreNodes_val, Adj, numCores=numCores, output = output, maxNumParents = maxNumParents, parsScore = parsScore, intervMat = intervMat, intervData = intervData, intervMatVal = intervMatVal)
computeScoreMatTmp$scoreMat <- updates$scoreMat
original_score_mat <- updates$scoreMat_original
computeScoreMatTmp$scoreMat_val <- updates$scoreMat_val
timeUpdate <- timeUpdate + proc.time()[3] - ptm
counterUpdate <- counterUpdate + 1
i <- i + 1
}
# print("Score before pruning:")
# print(tail(scoreVec, n=1))
# print(tail(scoreVec_val, n=1))
Adj_prepruned <- as.matrix(Adj)
####
# STEP 3: Prune the DAG
####
if(is_pruning)
{
# if(intervData)
# {
# X2 <- X[rowSums(intervMat) == 0,]
# cat("The preliminary neighbourhood selection is done with the observational data only.\n")
# } else
# {
# X2 <- X
# }
X2 <- X
if(output)
{
cat("\n Performing pruning ... \n ")
}
ptm <- proc.time()[3]
Adj <- pruning(X=X2,G=Adj,pruneMethod = pruneMethod, pruneMethodPars = pruneMethodPars, output=output, intervention={INTERVENTION}, interv_mat = intervMat)
Adj_tmp <- as(matrix(0,p,p), "sparseMatrix")
computeScoreMatTmp <- computeScoreMat_with_score(X, X_val, scoreName=scoreName, numParents = 1, numCores = numCores, output = output, selMat = selMat, parsScore = parsScore, intervMat = intervMat, intervData = intervData, intervMatVal = intervMatVal)
original_score_mat <- computeScoreMatTmp$scoreMatOriginal
original_score_mat_val <- computeScoreMatTmp$scoreMat_val
timePrune <- timePrune + proc.time()[3] - ptm
#Recompute the score after pruning
scoreNodes <- computeScoreMatTmp$scoreEmptyNodes
scoreNodes_val <- computeScoreMatTmp$scoreEmptyNodes_val
scoreVec <- c()
scoreVec_val <- c()
for(i in 1:dim(order_ix_max)[2]){
ix_max <- order_ix_max[,i]
if(Adj[ix_max[1], ix_max[2]] == 1){
Adj_tmp[ix_max[1], ix_max[2]] <- 1
ix_max_backward <- matrix(c(ix_max[2],ix_max[1]),1,2)
scoreNodes[ix_max[2]] <- scoreNodes[ix_max[2]] + computeScoreMatTmp$scoreMat[ix_max[1], ix_max[2]]
scoreNodes_val[ix_max[2]] <- scoreNodes_val[ix_max[2]] + computeScoreMatTmp$scoreMat_val[ix_max[1], ix_max[2]]
# Record the score of the current graph
scoreVec <- c(scoreVec, sum(scoreNodes))
scoreVec_val <- c(scoreVec_val, sum(scoreNodes_val))
# Update the score of column j
updates <- updateScoreMat_with_score(computeScoreMatTmp$scoreMat, original_score_mat, computeScoreMatTmp$scoreMat_val, X, X_val, scoreName = scoreName, ix_max[1], ix_max[2], scoreNodes, scoreNodes_val, Adj_tmp, numCores=numCores, output = output, maxNumParents = maxNumParents, parsScore = parsScore, intervMat = intervMat, intervData = intervData, intervMatVal = intervMatVal)
computeScoreMatTmp$scoreMat <- updates$scoreMat
original_score_mat <- updates$scoreMat_original
computeScoreMatTmp$scoreMat_val <- updates$scoreMat_val
}
}
}
# print("Score after pruning:")
# print(tail(scoreVec, n=1))
# print(tail(scoreVec_val, n=1))
final_score <- tail(scoreVec, n=1)
final_score_val <- tail(scoreVec_val, n=1)
show(paste("number of edges: ", sum(Adj), sep=""))
####
# Output and return
####
timeTotal <- timeSel + timeScoreMat + timeCycle + timeUpdate + timeMax + timePrune
if(output)
{
cat("amount of time for variable selection:",timeSel,"\n")
cat("amount of time computing the initial scoreMat:",timeScoreMat,"\n")
cat("amount of time checking for cycles:",timeCycle,"\n")
cat("amount of time computing updates for the scoreMat:",timeUpdate,", doing",counterUpdate,"updates.\n")
cat("amount of time for pruning:",timePrune,"\n")
cat("amount of time for finding maximum:",timeMax,"\n")
cat("amount of time in total:",timeTotal,"\n")
}
result <- list(Adj = Adj, Score = sum(scoreNodes), timesVec = c(timeSel, timeScoreMat, timeCycle, timeUpdate, timePrune, timeMax, timeTotal), scoreVec = scoreVec, edgeList = edgeList, final_score = final_score, final_score_val = final_score_val)
return(result)
}
computeScoreMat_with_score <-
function(X, X_val, scoreName, numParents, output, numCores, selMat, parsScore, intervMat, intervData, intervMatVal = NA)
{
# numParents indicates how many parents we consider. If numParents = 1 (default), then the
# score matrix is of dimension (p-1) x p. If numParents = 2, then the
# score matrix is of dimension (p-1)(p-2) x p and so on...
#
# scoreMat[i,j] equals the GAIN in score if we consider i being a parent of j.
# it should therefore be positive.
p <- dim(X)[2]
n <- dim(X)[1]
rowParents <- t(combn(p,numParents))
tt <- expand.grid(1:dim(rowParents)[1], 1:p)
allNode2 <- tt[,2]
allI <- tt[,1]
if(numCores == 1)
{
scoreMat <- mapply(computeScoreMatParallel_with_score, MoreArgs = list(rowParents = rowParents, selMat = selMat, scoreName = scoreName, X = X, X_val = X_val, output = output, parsScore = parsScore, intervMat = intervMat, intervData = intervData, intervMatVal = intervMatVal), node2 = allNode2, i = allI)
} else
{
scoreMat <- mcmapply(computeScoreMatParallel_with_score, MoreArgs = list(rowParents = rowParents, selMat = selMat, scoreName = scoreName, X = X, X_val = X_val, output = output, parsScore = parsScore, intervMat = intervMat, intervData = intervData, intervMatVal = intervMatVal), node2 = allNode2, i = allI, mc.cores = numCores)
}
print(dim(scoreMat))
scoreMat_train <- matrix(unlist(scoreMat[1,]),dim(rowParents)[1],p)
scoreMat_train_original <-matrix(unlist(scoreMat[1,]),dim(rowParents)[1],p)
scoreMat_val <- matrix(unlist(scoreMat[2,]),dim(rowParents)[1],p)
# initScore[i] equals the variance of variable j.
initScore <- rep(NA,p)
for(i in 1:p)
{
if(intervData)
{
X2 <- X[!intervMat[,i],]
} else
{
X2 <- X
}
vartmp <- var(X2[,i])
initScore[i] <- -log(vartmp)
# scoreMat[i,j] equals the GAIN in score if we consider i being a parent of j.
scoreMat_train[,i] <- scoreMat_train[,i] - initScore[i]
}
initScore_val <- rep(NA,p)
for(i in 1:p)
{
if(intervData)
{
X2 <- X_val[!intervMatVal[,i],]
} else
{
X2 <- X_val
}
vartmp_val <- var(X2[,i])
initScore_val[i] <- -log(vartmp_val)
scoreMat_val[,i] <- scoreMat_val[,i] - initScore_val[i]
}
return(list(scoreMat = scoreMat_train, scoreMatOriginal = scoreMat_train_original, scoreMat_val = scoreMat_val, rowParents = rowParents, scoreEmptyNodes = initScore, scoreEmptyNodes_val = initScore_val))
}
computeScoreMatParallel_with_score <-
function(rowParents, scoreName, X, X_val, selMat, output, node2, i, parsScore, intervMat, intervData, update=FALSE, intervMatVal = NA)
{
#the i-th row of rowParents contains possible parents of node2 (we call them "parentsToCheck")
parentsToCheck <- rowParents[i,]
if(output)
{
cat("\r compute score entry for regressing",node2,"on",parentsToCheck," \r")
}
if(intervData)
{
X2 <- X[!intervMat[,node2],]
X_val2 <- X_val[!intervMatVal[,node2],]
} else
{
X2 <- X
X_val2 <- X_val
}
if(!(node2 %in% parentsToCheck) && (prod(selMat[parentsToCheck,node2]) == TRUE))
{
if(scoreName == "SEMSEV")
{
stop("This score does not work. It does not decouple.")
} else if(scoreName == "SEMIND")
{
stop("NOT IMPLEMENTED YET")
} else if(scoreName == "SEMGAM")
{
mod_gam <- train_gam_with_score(X2[,parentsToCheck],X2[,node2],pars=parsScore)
score <- (-log(var(mod_gam$residuals)))
#TODO: check here...
nb_var <- count(mod_gam$formula, "+") + 1
data_frame <- data.frame(X_val2[,parentsToCheck])
var_name <- c()
for (v in 1:nb_var){
var_name <- c(var_name, paste("var", v+1, sep=""))
}
colnames(data_frame) <- c(var_name)
val <- predict.gam(mod_gam$model, data_frame)
residual <- X_val2[,node2] - val
score_val <- (-log(var(residual)))
} else if(scoreName == "SEMLIN")
{
if(intervData){
stop("Not implemented for interventions. Use SEMGAM")
}
mod_gam <- train_linear(X2[,parentsToCheck],X2[,node2])
score <- (-log(var(mod_gam$residuals)))
} else if(scoreName == "SEMGP")
{
if(intervData){
stop("Not implemented for interventions. Use SEMGAM")
}
mod_gp <- train_gp(X2[,parentsToCheck],X2[,node2])
score <- (-log(var(mod_gp$residuals)))
} else
{
stop("I do not know this score function.")
}
} else
{
score <- (-Inf)
score_val <- (-Inf)
}
return(list(score_train = score, score_val = score_val))
}
count = function(haystack, needle)
{v = attr(gregexpr(needle, haystack, fixed = T)[[1]], "match.length")
if (identical(v, -1L)) 0 else length(v)}
train_gam_with_score <-
function(X,y,pars = list(numBasisFcts = 10))
{
if(!("numBasisFcts" %in% names(pars) ))
{
pars$numBasisFcts = 10
}
p <- dim(as.matrix(X))
if(p[1]/p[2] < 3*pars$numBasisFcts)
{
pars$numBasisFcts <- ceiling(p[1]/(3*p[2]))
cat("changed number of basis functions to ", pars$numBasisFcts, " in order to have enough samples per basis function\n")
}
dat <- data.frame(as.matrix(y),as.matrix(X))
coln <- rep("null",p[2]+1)
for(i in 1:(p[2]+1))
{
coln[i] <- paste("var",i,sep="")
}
colnames(dat) <- coln
labs<-"var1 ~ "
if(p[2] > 1)
{
for(i in 2:p[2])
{
labs<-paste(labs,"s(var",i,",k = ",pars$numBasisFcts,") + ",sep="")
}
}
labs<-paste(labs,"s(var",p[2]+1,",k = ",pars$numBasisFcts,")",sep="")
mod_gam <- FALSE
try(mod_gam <- gam(formula=formula(labs), data=dat),silent = TRUE)
if(typeof(mod_gam) == "logical")
{
cat("There was some error with gam. The smoothing parameter is set to zero.\n")
labs<-"var1 ~ "
if(p[2] > 1)
{
for(i in 2:p[2])
{
labs<-paste(labs,"s(var",i,",k = ",pars$numBasisFcts,",sp=0) + ",sep="")
}
}
labs<-paste(labs,"s(var",p[2]+1,",k = ",pars$numBasisFcts,",sp=0)",sep="")
mod_gam <- gam(formula=formula(labs), data=dat)
}
result <- list()
result$Yfit <- as.matrix(mod_gam$fitted.values)
result$residuals <- as.matrix(mod_gam$residuals)
result$model <- mod_gam
result$df <- mod_gam$df.residual
result$edf <- mod_gam$edf
result$edf1 <- mod_gam$edf1
result$formula <- labs
# for degree of freedom see mod_gam$df.residual
# for aic see mod_gam$aic
return(result)
}
updateScoreMat_with_score <-
function(scoreMat, scoreMat_train_original, scoreMat_val, X, X_val, scoreName, i, j, scoreNodes, scoreNodes_val, Adj, output, numCores, maxNumParents, parsScore, intervMat, intervData, intervMatVal = NA)
# new edge: from i to j
{
p <- dim(X)[2]
existingParOfJ <- which(Adj[,j] == 1)
notAllowedParOfJ <- setdiff(which(scoreMat[,j] == -Inf), c(existingParOfJ,j))
# if there is something left that we need to update
if(length(existingParOfJ) + length(notAllowedParOfJ) < p-1)
{
# update column for j
rowParents <- matrix(c(existingParOfJ,NA), p, length(existingParOfJ)+1, byrow = TRUE)
rowParents[,length(existingParOfJ)+1] <- 1:p
toUpdate <- setdiff(1:p,c(j,existingParOfJ,notAllowedParOfJ))
if(length(existingParOfJ)< maxNumParents)
{
if(numCores == 1)
{
scoreUpdate <- mapply(computeScoreMatParallel_with_score,MoreArgs = list(rowParents = rowParents, selMat = matrix(TRUE,p,p), scoreName = scoreName, X = X, X_val = X_val, output = output, node2 = j, parsScore = parsScore, intervMat = intervMat, intervData = intervData, update=TRUE, intervMatVal = intervMatVal), i = toUpdate)
} else
{
scoreUpdate <- mcmapply(computeScoreMatParallel_with_score,MoreArgs = list(rowParents = rowParents, selMat = matrix(TRUE,p,p), scoreName = scoreName, X = X, X_val = X_val, output = output, node2 = j, parsScore = parsScore, intervMat = intervMat, intervData = intervData, update=TRUE, intervMatVal = intervMatVal), i = toUpdate, mc.cores = numCores)
}
score_train <- scoreUpdate['score_train',]
score_val <- scoreUpdate['score_val',]
scoreUpdate$score <- unlist(score_train)
scoreUpdate$score_val <- unlist(score_val)
} else
{
scoreUpdate$score <- -Inf - scoreNodes[j]
scoreUpdate$score_val <- -Inf - scoreNodes_val[j]
}
scoreMat_train_original[toUpdate,j] <- scoreUpdate$score
scoreMat[toUpdate,j] <- scoreUpdate$score - scoreNodes[j]
scoreMat_val[toUpdate,j] <- scoreUpdate$score_val - scoreNodes_val[j]
}
return(list(scoreMat = scoreMat, scoreMat_original = scoreMat_train_original, scoreMat_val = scoreMat_val))
}
set.seed(42)
# load the datasets
dataset <- read.csv(file='{FOLDER}{FILE_TRAIN}', sep=",", header=FALSE);
dataset_val <- read.csv(file='{FOLDER}{FILE_VALID}', sep=",", header=FALSE);
if({INTERVENTION}){
show("Using intervention")
# load the intervention matrices
mask <- read.csv(file='{FOLDER}{TARGETS_TRAIN}', sep=",", header=FALSE)
mask_val <- read.csv(file='{FOLDER}{TARGETS_VALID}', sep=",", header=FALSE)
interv_mat <- ! mask
interv_mat_val <- ! mask_val
estDAG <- CAM_with_score(dataset, X_val = dataset_val, intervData = TRUE, intervMat = interv_mat, intervMatVal = interv_mat_val, scoreName = "{SCORE}",
numCores = {NJOBS}, output = {VERBOSE}, variableSel = {VARSEL}, variableSelMethod = {SELMETHOD},
is_pruning = {PRUNING}, pruneMethod = {PRUNMETHOD}, pruneMethodPars = list(cutOffPVal = {CUTOFF}))
}else{
show("Not using intervention")
estDAG <- CAM_with_score(dataset, X_val = dataset_val, scoreName = "{SCORE}", numCores = {NJOBS}, output = {VERBOSE},
variableSel = {VARSEL}, variableSelMethod = {SELMETHOD}, is_pruning = {PRUNING},
pruneMethod = {PRUNMETHOD}, pruneMethodPars = list(cutOffPVal = {CUTOFF}))
}
# save estimated DAG
write.csv(as.matrix(estDAG$Adj),row.names = FALSE, file = '{FOLDER}{OUTPUT}');
# save scores (train and val)
scores <- c(estDAG$final_score, estDAG$final_score_val)
write.csv(scores, row.names = FALSE, file = '{FOLDER}{OUTPUT2}');
|
require(shiny)
require(shinyjs)
require(r4ss)
require(plyr)
require(dplyr)
require(ggplot2)
require(reshape2)
require(data.table)
require(tidyr)
require(rlist)
require(viridis)
require(sss)
require(shinyWidgets)
require(shinyFiles)
require(HandyCode)
require(nwfscDiag)
require(shinybusy)
require(truncnorm)
require(flextable)
require(officer)
require(gridExtra)
require(ggpubr)
require(grid)
require(wesanderson)
require(adnuts)
require(shinystan)
#require(paletteer)
#require(RColorBrewer)
#require(ggthemes)
#devtools::load_all("C:/Users/Jason.Cope/Documents/Github/nwfscDiag")
source('Functions.r',local = FALSE)
theme_report <- function(base_size = 11) {
half_line <- base_size/2
theme_light(base_size = base_size) +
theme(
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.ticks.length = unit(half_line / 2.2, "pt"),
strip.background = element_rect(fill = NA, colour = NA),
strip.text.x = element_text(colour = "black"),
strip.text.y = element_text(colour = "black"),
panel.border = element_rect(fill = NA),
legend.key.size = unit(0.9, "lines"),
legend.key = element_rect(colour = NA, fill = NA),
legend.background = element_rect(colour = NA, fill = NA)
)
}
theme_set(theme_report())
shinyServer(function(input, output,session) {
useShinyjs()
theme_report <- function(base_size = 11) {
half_line <- base_size/2
theme_light(base_size = base_size) +
theme(
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.ticks.length = unit(half_line / 2.2, "pt"),
strip.background = element_rect(fill = NA, colour = NA),
strip.text.x = element_text(colour = "black"),
strip.text.y = element_text(colour = "black"),
panel.border = element_rect(fill = NA),
legend.key.size = unit(0.9, "lines"),
legend.key = element_rect(colour = NA, fill = NA),
legend.background = element_rect(colour = NA, fill = NA)
)
}
theme_set(theme_report())
#################
### FUNCTIONS ###
#################
VBGF<-function(Linf, k, t0, ages){
Linf * (1 - exp(-k * (ages - t0)))
}
VBGF.age<-function(Linf,k,t0,lt){
t0 - (log(1 - (lt / Linf)) / k)
}
RUN.SS<-function(path,ss.cmd=" -nohess -nox",OS.in="Windows"){
navigate <- paste("cd ", path, sep="")
if(OS.in=="Windows")
{
#command <- paste0(navigate," & ", "ss", ss.cmd)
#shell(command, invisible=TRUE, translate=TRUE)
run(path,exe="ss",extras=ss.cmd,skipfinished=FALSE,show_in_console = TRUE)
}
if(OS.in=="Mac")
{
command <- c(paste("cd", path), "chmod +x ./ss_osx",paste("./ss_osx", ss.cmd))
system(paste(command, collapse=";"),invisible=TRUE)
#command <- paste0(path,"/./ss_mac", ss.cmd)
#system(command, invisible=TRUE)
}
if(OS.in=="Linux")
{
command <- c(paste("cd", path), "chmod +x ./ss_linux",paste("./ss_linux", ss.cmd))
system(paste(command, collapse=";"), invisible=TRUE)
}
}
pngfun <- function(wd, file,w=7,h=7,pt=12){
file <- file.path(wd, file)
cat('writing PNG to',file,'\n')
png(filename=file,
width=w,height=h,
units='in',res=300,pointsize=pt)
}
rc <- function(n,alpha=1){
# a subset of rich.colors by Arni Magnusson from the gregmisc package
# a.k.a. rich.colors.short, but put directly in this function
# to try to diagnose problem with transparency on one computer
x <- seq(0, 1, length = n)
r <- 1/(1 + exp(20 - 35 * x))
g <- pmin(pmax(0, -0.8 + 6 * x - 5 * x^2), 1)
b <- dnorm(x, 0.25, 0.15)/max(dnorm(x, 0.25, 0.15))
rgb.m <- matrix(c(r, g, b), ncol = 3)
rich.vector <- apply(rgb.m, 1, function(v) rgb(v[1], v[2], v[3], alpha=alpha))
}
doubleNorm24.sel <- function(Sel50,Selpeak,PeakDesc,LtPeakFinal,FinalSel) {
#UPDATED: - input e and f on 0 to 1 scal and transfrom to logit scale
# - changed bin width in peak2 calculation
# - updated index of sel when j2 < length(x)
# - renamed input parameters, cannot have same names as the logitstic function
# - function not handling f < -1000 correctly
x<-seq(1,Selpeak+Selpeak,1)
bin_width <- x[2] - x[1]
a<- Selpeak
b<- -log((max(x)-Selpeak-bin_width)/(PeakDesc-Selpeak-bin_width))
c<- log(-((Sel50-Selpeak)^2/log(0.5)))
d<- log(LtPeakFinal)
e<- -15
f<- -log((1/(FinalSel+0.000000001)-1))
sel <- rep(NA, length(x))
startbin <- 1
peak <- a
upselex <- exp(c)
downselex <- exp(d)
final <- f
if (e < -1000) {
j1 <- -1001 - round(e)
sel[1:j1] <- 1e-06
}
if (e >= -1000) {
j1 <- startbin - 1
if (e > -999) {
point1 <- 1/(1 + exp(-e))
t1min <- exp(-(x[startbin] - peak)^2/upselex)
}
}
if (f < -1000)
j2 <- -1000 - round(f)
if (f >= -1000)
j2 <- length(x)
peak2 <- peak + bin_width + (0.99 * x[j2] - peak - bin_width)/(1 +
exp(-b))
if (f > -999) {
point2 <- 1/(1 + exp(-final))
t2min <- exp(-(x[j2] - peak2)^2/downselex)
}
t1 <- x - peak
t2 <- x - peak2
join1 <- 1/(1 + exp(-(20/(1 + abs(t1))) * t1))
join2 <- 1/(1 + exp(-(20/(1 + abs(t2))) * t2))
if (e > -999)
asc <- point1 + (1 - point1) * (exp(-t1^2/upselex) -
t1min)/(1 - t1min)
if (e <= -999)
asc <- exp(-t1^2/upselex)
if (f > -999)
dsc <- 1 + (point2 - 1) * (exp(-t2^2/downselex) -
1)/(t2min - 1)
if (f <= -999)
dsc <- exp(-(t2)^2/downselex)
idx.seq <- (j1 + 1):j2
sel[idx.seq] <- asc[idx.seq] * (1 - join1[idx.seq]) + join1[idx.seq] * (1 -
join2[idx.seq] + dsc[idx.seq] * join2[idx.seq])
if (startbin > 1 && e >= -1000) {
sel[1:startbin] <- (x[1:startbin]/x[startbin])^2 *
sel[startbin]
}
if (j2 < length(x))
sel[(j2 + 1):length(x)] <- sel[j2]
return(cbind(x,sel))
}
########## Clear data files and plots ############
rv.Lt <- reactiveValues(data = NULL,clear = FALSE)
rv.Age <- reactiveValues(data = NULL,clear = FALSE)
rv.Ct <- reactiveValues(data = NULL,clear = FALSE)
rv.Index <- reactiveValues(data = NULL,clear = FALSE)
rv.AgeErr <- reactiveValues(data = NULL,clear = FALSE)
########
#Reset catches
observe({
req(input$file2)
req(!rv.Ct$clear)
rv.Ct$data <- fread(input$file2$datapath,check.names=FALSE,data.table=FALSE)
#L <- readLines(input$file2$datapath, n = 1)
#if(grepl(";", L)) {rv.Ct$data <- read.csv2(input$file2$datapath,check.names=FALSE)}
})
observeEvent(input$file2, {
rv.Ct$clear <- FALSE
}, priority = 1000)
observeEvent(input$reset_ct, {
rv.Ct$data <- NULL
rv.Ct$clear <- TRUE
reset('file2')
}, priority = 1000)
#Reset lengths
observe({
req(input$file1)
req(!rv.Lt$clear)
rv.Lt$data <- fread(input$file1$datapath,check.names=FALSE,data.table=FALSE)
#L <- readLines(input$file1$datapath, n = 1)
#rv.Lt$data <- read.csv(input$file1$datapath,check.names=FALSE)
#if(grepl(";", L)) {rv.Lt$data <- read.csv2(input$file1$datapath,check.names=FALSE)}
})
observeEvent(input$file1, {
rv.Lt$clear <- FALSE
}, priority = 1000)
observeEvent(input$reset_lt, {
rv.Lt$data <- NULL
rv.Lt$clear <- TRUE
reset('file1')
}, priority = 1000)
#Reset ages
observe({
req(input$file3)
req(!rv.Age$clear)
rv.Age$data <- fread(input$file3$datapath,check.names=FALSE,data.table=FALSE)
#L <- readLines(input$file3$datapath, n = 1)
#if(grepl(";", L)) {rv.Age$data <- read.csv2(input$file3$datapath,check.names=FALSE)}
})
observeEvent(input$file3, {
rv.Age$clear <- FALSE
}, priority = 1000)
observeEvent(input$reset_age, {
rv.Age$data <- NULL
rv.Age$clear <- TRUE
reset('file3')
}, priority = 1000)
#Reset ageing error
observe({
req(input$file33)
req(!rv.AgeErr$clear)
rv.AgeErr$data <- fread(input$file33$datapath,check.names=FALSE,header=FALSE,data.table=FALSE)
#L <- readLines(input$file33$datapath, n = 1)
#if(grepl(";", L)) {rv.AgeErr$data <- read.csv2(input$file33$datapath,check.names=FALSE,header=FALSE)}
})
observeEvent(input$file33, {
rv.AgeErr$clear <- FALSE
if(!input$Ageing_error_choice){
rv.AgeErr$data <- NULL
rv.AgeErr$clear <- TRUE
reset('file33')}
}, priority = 1000)
# # if(!is.null(input$Ageing_error_choice)){
# observeEvent(input$file33, {
# if(!input$Ageing_error_choice){
# rv.AgeErr$data <- NULL
# rv.AgeErr$clear <- TRUE
# reset('file33') #}
# }, priority = 1000)
# }
#Reset index
observe({
req(input$file4)
req(!rv.Index$clear)
rv.Index$data <- fread(input$file4$datapath,check.names=FALSE,data.table=FALSE)
#L <- readLines(input$file4$datapath, n = 1)
#rv.Index$data <- read.csv(input$file4$datapath,check.names=FALSE)
#if(grepl(";", L)) {rv.Index$data <- read.csv2(input$file4$datapath,check.names=FALSE,header=FALSE)}
})
observeEvent(input$file4, {
rv.Index$clear <- FALSE
}, priority = 1000)
observeEvent(input$reset_index, {
rv.Index$data <- NULL
rv.Index$clear <- TRUE
reset('file4')
}, priority = 1000)
#Throw an error if fleets are not consecutively represented in all loaded data sets.
observeEvent(req(any(!is.null(rv.Ct$data),!is.null(rv.Lt$data),!is.null(rv.Age$data),!is.null(rv.Index$data))),{
ct.flt<-lt.flt<-age.flt<-index.flt<-NA
if(!is.null(rv.Ct$data)){ct.flt<-c(1:(ncol(rv.Ct$data)))}
if(!is.null(rv.Lt$data)){lt.flt<-rv.Lt$data[,3]}
if(!is.null(rv.Age$data)){age.flt<-rv.Age$data[,3]}
if(!is.null(rv.Index$data)){index.flt<-rv.Index$data[,3]}
fleets.no.negs<-unique(na.omit(c(ct.flt,lt.flt,age.flt,index.flt)))[unique(na.omit(c(ct.flt,lt.flt,age.flt,index.flt)))>0] #remove any negative fleets
if(length(fleets.no.negs)!=length(seq(1:max(fleets.no.negs))))
{
sendSweetAlert(
session = session,
title = "Model Warning",
text = "Non-consecutive fleet numbering. Check all data sets (e.g., catch, lengths, ages, indices) to make sure all fleets from 1 to the maximum fleet number are found when considered across all data sets. For instance, if you have 3 total fleets, there should not be a fleet number > 3 (e.g., 1,2,4). All fleets are not expected in each data file, just across all data files.",
type = "warning")
}
})
#######
# observeEvent(input$reset_lt, {
# rv.Lt$data <- NULL
# shinyjs::reset('file1')
# })
# # observeEvent(input$reset_lt, {
# # output$Ltplot<-renderPlot({
# # rv.Lt$data <- NULL
# # if (is.null(rv.Lt$data)) return(NULL)
# # })
# # })
# observeEvent(input$reset_age, {
# rv.Age$data <- NULL
# shinyjs::reset('file3')
# })
# observeEvent(input$reset_ct, {
# rv.Ct$data <- NULL
# shinyjs::reset('file2')
# })
#####################################################
onclick("est_LHparms",id="panel_SS_est")
observe({
shinyjs::show("Data_panel")
hideTab(inputId = "tabs", target = "11")
#shinyjs::hide("OS_choice")
#shinyjs::hide("run_SS")
#shinyjs::hide("run_SSS")
})
#To get the ObserveEvent to work, each statement in req needs to be unique.
#This explains the workaround of ((as.numeric(input$tabs)*x)/x)<4, where x is the unique type of assessment being run
#This input allows other tabs to have different side panels.
#Switch back to data from different tabs
observeEvent(req(((as.numeric(input$tabs)*99)/99)<4), {
shinyjs::show("Data_panel")
shinyjs::show("Existing_files")
shinyjs::hide("panel_Ct_F_LO")
shinyjs::hide("panel_data_wt_lt")
shinyjs::hide("panel_ct_wt_LO")
shinyjs::hide("panel_SSS")
shinyjs::hide("panel_SSLO_LH")
shinyjs::hide("panel_SSLO_fixed")
shinyjs::hide("panel_SS_LH_fixed_est_tog")
shinyjs::hide("panel_SS_LH_fixed")
shinyjs::hide("panel_SS_fixed")
shinyjs::hide("panel_SS_LH_est")
shinyjs::hide("panel_SS_est")
shinyjs::hide("panel_SS_stock_status")
shinyjs::hide("panel_SSS_prod")
shinyjs::hide("panel_SS_LO_prod")
shinyjs::hide("panel_SS_prod_fixed")
shinyjs::hide("panel_SS_prod_est")
shinyjs::hide("panel_selectivity")
shinyjs::hide("panel_selectivity_sss")
shinyjs::hide("panel_SS_recdevs")
shinyjs::hide("panel_SS_jitter")
shinyjs::hide("panel_RPs")
shinyjs::hide("panel_Forecasts")
shinyjs::hide("panel_Mod_dims")
shinyjs::hide("panel_advanced_SS")
shinyjs::hide("panel_advanced_user_SS")
shinyjs::hide("panel_advanced_SSS")
shinyjs::hide("panel_SSS_reps")
shinyjs::hide("OS_choice")
shinyjs::hide("Scenario_panel")
shinyjs::hide("run_SSS")
shinyjs::hide("run_SS")
shinyjs::hide("Modeff_panel")
shinyjs::hide("Profile_panel")
shinyjs::hide("Retro_panel")
shinyjs::hide("Sensi_Comparison_panel")
shinyjs::hide("Ensemble_panel")
hideTab(inputId = "tabs", target = "11")
showTab(inputId = "tabs", target = "2")
# hideTab(inputId = "tabs", target = "3")
# hideTab(inputId = "tabs", target = "4")
# hideTab(inputId = "tabs", target = "5")
# hideTab(inputId = "tabs", target = "6")
})
#Reset when all things are clicked off
observeEvent(req(((as.numeric(input$tabs)*1)/1)<4&is.null(rv.Lt$data)&is.null(rv.Ct$data)&is.null(rv.Age$data)&is.null(rv.Index$data)&any(is.null(input$user_model),!input$user_model)), {
shinyjs::show("Data_panel")
shinyjs::show("Existing_files")
shinyjs::hide("panel_Ct_F_LO")
shinyjs::hide("panel_data_wt_lt")
shinyjs::hide("panel_ct_wt_LO")
shinyjs::hide("panel_SSS")
shinyjs::hide("panel_SSLO_LH")
shinyjs::hide("panel_SSLO_fixed")
shinyjs::hide("panel_SS_LH_fixed_est_tog")
shinyjs::hide("panel_SS_LH_fixed")
shinyjs::hide("panel_SS_fixed")
shinyjs::hide("panel_SS_LH_est")
shinyjs::hide("panel_SS_est")
shinyjs::hide("panel_SS_stock_status")
shinyjs::hide("panel_SSS_prod")
shinyjs::hide("panel_SS_LO_prod")
shinyjs::hide("panel_SS_prod_fixed")
shinyjs::hide("panel_SS_prod_est")
shinyjs::hide("panel_selectivity")
shinyjs::hide("panel_selectivity_sss")
shinyjs::hide("panel_SS_recdevs")
shinyjs::hide("panel_SS_jitter")
shinyjs::hide("panel_RPs")
shinyjs::hide("panel_Forecasts")
shinyjs::hide("panel_Mod_dims")
shinyjs::hide("panel_advanced_SS")
shinyjs::hide("panel_advanced_user_SS")
shinyjs::hide("panel_advanced_SSS")
shinyjs::hide("panel_SSS_reps")
shinyjs::hide("OS_choice")
shinyjs::hide("Scenario_panel")
shinyjs::hide("run_SSS")
shinyjs::hide("run_SS")
shinyjs::hide("Modeff_panel")
shinyjs::hide("Profile_panel")
shinyjs::hide("Retro_panel")
shinyjs::hide("Sensi_Comparison_panel")
shinyjs::hide("Ensemble_panel")
hideTab(inputId = "tabs", target = "11")
showTab(inputId = "tabs", target = "2")
})
#User chosen model
observeEvent(req(!is.null(input$user_model)&input$user_model), {
shinyjs::show("Data_panel")
shinyjs::show("Existing_files")
shinyjs::hide("panel_Ct_F_LO")
shinyjs::show("panel_data_wt_lt")
shinyjs::hide("panel_ct_wt_LO")
shinyjs::hide("panel_SSS")
shinyjs::hide("panel_SSLO_LH")
shinyjs::hide("panel_SSLO_fixed")
shinyjs::hide("panel_SS_LH_fixed_est_tog")
shinyjs::hide("panel_SS_LH_fixed")
shinyjs::hide("panel_SS_fixed")
shinyjs::hide("panel_SS_LH_est")
shinyjs::hide("panel_SS_est")
shinyjs::hide("panel_SS_stock_status")
shinyjs::hide("panel_SSS_prod")
shinyjs::hide("panel_SS_LO_prod")
shinyjs::hide("panel_SS_prod_fixed")
shinyjs::hide("panel_SS_prod_est")
shinyjs::hide("panel_selectivity")
shinyjs::hide("panel_selectivity_sss")
shinyjs::hide("panel_SS_recdevs")
shinyjs::show("panel_SS_jitter")
shinyjs::show("panel_RPs")
shinyjs::show("panel_Forecasts")
shinyjs::hide("panel_Mod_dims")
shinyjs::hide("panel_SSS_reps")
shinyjs::hide("panel_advanced_SS")
shinyjs::show("panel_advanced_user_SS")
shinyjs::hide("panel_advanced_SSS")
shinyjs::show("OS_choice")
shinyjs::show("Scenario_panel")
shinyjs::hide("run_SSS")
shinyjs::show("run_SS")
shinyjs::hide("Modeff_panel")
shinyjs::hide("Profile_panel")
shinyjs::hide("Retro_panel")
shinyjs::hide("Sensi_Comparison_panel")
shinyjs::hide("Ensemble_panel")
#shinyjs::show("tab_sss")
showTab(inputId = "tabs", target = "11")
hideTab(inputId = "tabs", target = "2")
})
#SSS panels
observeEvent(req(((as.numeric(input$tabs)*1)/1)<4&is.null(rv.Lt$data)&!is.null(rv.Ct$data)&is.null(rv.Age$data)&is.null(rv.Index$data)&any(is.null(input$user_model),!input$user_model)), {
shinyjs::show("Data_panel")
shinyjs::hide("Existing_files")
shinyjs::hide("panel_Ct_F_LO")
shinyjs::hide("panel_data_wt_lt")
shinyjs::hide("panel_ct_wt_LO")
shinyjs::show("panel_SSS")
shinyjs::hide("panel_SSLO_LH")
shinyjs::hide("panel_SSLO_fixed")
shinyjs::hide("panel_SS_LH_fixed_est_tog")
shinyjs::hide("panel_SS_LH_fixed")
shinyjs::hide("panel_SS_fixed")
shinyjs::hide("panel_SS_LH_est")
shinyjs::hide("panel_SS_est")
shinyjs::show("panel_SS_stock_status")
shinyjs::show("panel_SSS_prod")
shinyjs::hide("panel_SS_LO_prod")
shinyjs::hide("panel_SS_prod_fixed")
shinyjs::hide("panel_SS_prod_est")
shinyjs::hide("panel_selectivity")
shinyjs::show("panel_selectivity_sss")
shinyjs::hide("panel_SS_recdevs")
shinyjs::hide("panel_SS_jitter")
shinyjs::show("panel_RPs")
shinyjs::show("panel_Forecasts")
shinyjs::show("panel_Mod_dims")
shinyjs::show("panel_SSS_reps")
shinyjs::hide("panel_advanced_SS")
shinyjs::hide("panel_advanced_user_SS")
shinyjs::show("panel_advanced_SSS")
shinyjs::show("OS_choice")
shinyjs::show("Scenario_panel")
shinyjs::show("run_SSS")
shinyjs::hide("run_SS")
shinyjs::hide("Modeff_panel")
shinyjs::hide("Profile_panel")
shinyjs::hide("Retro_panel")
shinyjs::hide("Sensi_Comparison_panel")
shinyjs::hide("Ensemble_panel")
#shinyjs::show("tab_sss")
showTab(inputId = "tabs", target = "11")
hideTab(inputId = "tabs", target = "2")
})
#SS-LO panels
observeEvent(req(((as.numeric(input$tabs)*2)/2)<4&all(!is.null(c(rv.Lt$data,rv.Age$data)),is.null(rv.Ct$data))&any(is.null(input$user_model),!input$user_model)), {
shinyjs::show("Data_panel")
shinyjs::show("Existing_files")
shinyjs::show("panel_Ct_F_LO")
shinyjs::show("panel_data_wt_lt")
if(length(unique(rv.Lt$data[,3]))>1|length(unique(rv.Age$data[,3]))>1){shinyjs::show("panel_ct_wt_LO")}
if(length(unique(rv.Lt$data[,3]))==1|length(unique(rv.Age$data[,3]))==1){shinyjs::hide("panel_ct_wt_LO")}
#if(input$Ct_F_LO_select){shinyjs::show("panel_ct_wt_LO")}
#if(input$Ct_F_LO_select==NULL){shinyjs::hide("panel_ct_wt_LO")}
shinyjs::hide("panel_SSS")
shinyjs::show("panel_SSLO_LH")
shinyjs::show("panel_SSLO_fixed")
shinyjs::hide("panel_SS_LH_fixed_est_tog")
shinyjs::hide("panel_SS_LH_fixed")
shinyjs::hide("panel_SS_fixed")
shinyjs::hide("panel_SS_LH_est")
shinyjs::hide("panel_SS_est")
shinyjs::hide("panel_SS_stock_status")
shinyjs::hide("panel_SSS_prod")
shinyjs::show("panel_SS_LO_prod")
shinyjs::hide("panel_SS_prod_fixed")
shinyjs::hide("panel_SS_prod_est")
shinyjs::show("panel_selectivity")
shinyjs::hide("panel_selectivity_sss")
shinyjs::show("panel_SS_recdevs")
shinyjs::show("panel_SS_jitter")
shinyjs::show("panel_RPs")
shinyjs::show("panel_Forecasts")
shinyjs::show("panel_Mod_dims")
shinyjs::show("panel_advanced_SS")
shinyjs::hide("panel_advanced_user_SS")
shinyjs::hide("panel_advanced_SSS")
shinyjs::hide("panel_SSS_reps")
shinyjs::show("OS_choice")
shinyjs::show("Scenario_panel")
shinyjs::hide("run_SSS")
shinyjs::show("run_SS")
shinyjs::hide("Modeff_panel")
shinyjs::hide("Profile_panel")
shinyjs::hide("Retro_panel")
shinyjs::hide("Sensi_Comparison_panel")
shinyjs::hide("Ensemble_panel")
hideTab(inputId = "tabs", target = "11")
showTab(inputId = "tabs", target = "2")
# showTab(inputId = "tabs", target = "3")
# showTab(inputId = "tabs", target = "4")
# hideTab(inputId = "tabs", target = "5")
# showTab(inputId = "tabs", target = "6")
})
#SS-CL fixed parameters
observeEvent(req(((as.numeric(input$tabs)*3)/3)<4&all(any(input$est_parms==FALSE,input$est_parms2==FALSE),any(all(!is.null(rv.Lt$data),!is.null(rv.Ct$data)),all(!is.null(rv.Age$data),!is.null(rv.Ct$data)),all(!is.null(rv.Index$data),!is.null(rv.Ct$data))))&any(is.null(input$user_model),!input$user_model)), {
shinyjs::show("Data_panel")
shinyjs::show("Existing_files")
shinyjs::hide("panel_Ct_F_LO")
if(any(!is.null(rv.Lt$data),!is.null(rv.Age$data))){shinyjs::show("panel_data_wt_lt")}
else (shinyjs::hide("panel_data_wt_lt"))
shinyjs::hide("panel_ct_wt_LO")
shinyjs::hide("panel_SSS")
shinyjs::hide("panel_SSLO_LH")
shinyjs::hide("panel_SSLO_fixed")
shinyjs::show("panel_SS_LH_fixed_est_tog")
shinyjs::show("panel_SS_LH_fixed")
shinyjs::show("panel_SS_fixed")
shinyjs::hide("panel_SS_LH_est")
shinyjs::hide("panel_SS_est")
shinyjs::hide("panel_SS_stock_status")
shinyjs::hide("panel_SSS_prod")
shinyjs::hide("panel_SS_LO_prod")
shinyjs::show("panel_SS_prod_fixed")
shinyjs::hide("panel_SS_prod_est")
shinyjs::show("panel_selectivity")
shinyjs::hide("panel_selectivity_sss")
shinyjs::show("panel_SS_recdevs")
shinyjs::show("panel_SS_jitter")
shinyjs::show("panel_RPs")
shinyjs::show("panel_Forecasts")
shinyjs::show("panel_Mod_dims")
shinyjs::show("panel_advanced_SS")
shinyjs::hide("panel_advanced_user_SS")
shinyjs::hide("panel_advanced_SSS")
shinyjs::show("OS_choice")
shinyjs::show("Scenario_panel")
shinyjs::hide("panel_SSS_reps")
shinyjs::hide("run_SSS")
shinyjs::show("run_SS")
shinyjs::hide("Modeff_panel")
shinyjs::hide("Profile_panel")
shinyjs::hide("Retro_panel")
shinyjs::hide("Sensi_Comparison_panel")
shinyjs::hide("Ensemble_panel")
#shinyjs::hide(selector = "#navbar li a[data-value=11]")
hideTab(inputId = "tabs", target = "11")
showTab(inputId = "tabs", target = "2")
# show(selector = '#hello li a[data-value="2"]')
#show(selector = '#hello li a[data-value="2"]')
# showTab(inputId = "tabs", target = "2")
# showTab(inputId = "tabs", target = "3")
# showTab(inputId = "tabs", target = "4")
# showTab(inputId = "tabs", target = "5")
# showTab(inputId = "tabs", target = "6")
})
#SS-CL with parameter estimates
observeEvent(req(((as.numeric(input$tabs)*4)/4)<4&all(input$est_parms==TRUE,any(all(!is.null(rv.Lt$data),!is.null(rv.Ct$data)),all(!is.null(rv.Age$data),!is.null(rv.Ct$data)),all(!is.null(rv.Index$data),!is.null(rv.Ct$data))))&any(is.null(input$user_model),!input$user_model)), {
shinyjs::show("Data_panel")
shinyjs::show("Existing_files")
shinyjs::hide("panel_Ct_F_LO")
if(any(!is.null(rv.Lt$data),!is.null(rv.Age$data))){shinyjs::show("panel_data_wt_lt")}
else (shinyjs::hide("panel_data_wt_lt"))
shinyjs::hide("panel_ct_wt_LO")
shinyjs::hide("panel_SSS")
shinyjs::hide("panel_SSLO_LH")
shinyjs::hide("panel_SSLO_fixed")
shinyjs::show("panel_SS_LH_fixed_est_tog")
shinyjs::hide("panel_SS_LH_fixed")
shinyjs::hide("panel_SS_fixed")
shinyjs::show("panel_SS_LH_est")
shinyjs::show("panel_SS_est")
shinyjs::hide("panel_SS_stock_status")
shinyjs::hide("panel_SSS_prod")
shinyjs::hide("panel_SS_LO_prod")
shinyjs::hide("panel_SS_prod_fixed")
shinyjs::show("panel_SS_prod_est")
shinyjs::show("panel_selectivity")
shinyjs::hide("panel_selectivity_sss")
shinyjs::show("panel_SS_recdevs")
shinyjs::show("panel_SS_jitter")
shinyjs::show("panel_RPs")
shinyjs::show("panel_Forecasts")
shinyjs::show("panel_Mod_dims")
shinyjs::show("panel_advanced_SS")
shinyjs::hide("panel_advanced_user_SS")
shinyjs::hide("panel_advanced_SSS")
shinyjs::show("OS_choice")
shinyjs::show("Scenario_panel")
shinyjs::hide("panel_SSS_reps")
shinyjs::hide("run_SSS")
shinyjs::show("run_SS")
shinyjs::hide("Modeff_panel")
shinyjs::hide("Profile_panel")
shinyjs::hide("Retro_panel")
shinyjs::hide("Sensi_Comparison_panel")
shinyjs::hide("Ensemble_panel")
hideTab(inputId = "tabs", target = "11")
showTab(inputId = "tabs", target = "2")
# showTab(inputId = "tabs", target = "3")
# showTab(inputId = "tabs", target = "4")
# showTab(inputId = "tabs", target = "5")
# showTab(inputId = "tabs", target = "6")
})
#Model Efficiency
observeEvent(req((as.numeric(input$tabs)*12/12)==12), {
shinyjs::hide("Data_panel")
shinyjs::hide("Existing_files")
shinyjs::hide("panel_Ct_F_LO")
shinyjs::hide("panel_data_wt_lt")
shinyjs::hide("panel_ct_wt_LO")
shinyjs::hide("panel_SSS")
shinyjs::hide("panel_SSLO_LH")
shinyjs::hide("panel_SSLO_fixed")
shinyjs::hide("panel_SS_LH_fixed_est_tog")
shinyjs::hide("panel_SS_LH_fixed")
shinyjs::hide("panel_SS_fixed")
shinyjs::hide("panel_SS_LH_est")
shinyjs::hide("panel_SS_est")
shinyjs::hide("panel_SS_stock_status")
shinyjs::hide("panel_SSS_prod")
shinyjs::hide("panel_SS_LO_prod")
shinyjs::hide("panel_SS_prod_fixed")
shinyjs::hide("panel_SS_prod_est")
shinyjs::hide("panel_selectivity")
shinyjs::hide("panel_selectivity_sss")
shinyjs::hide("panel_SS_recdevs")
shinyjs::hide("panel_SS_jitter")
shinyjs::hide("panel_RPs")
shinyjs::hide("panel_Forecasts")
shinyjs::hide("panel_Mod_dims")
shinyjs::hide("panel_advanced_SS")
shinyjs::hide("panel_advanced_user_SS")
shinyjs::hide("panel_advanced_SSS")
shinyjs::hide("OS_choice")
shinyjs::hide("Scenario_panel")
shinyjs::hide("panel_SSS_reps")
shinyjs::hide("run_SSS")
shinyjs::hide("run_SS")
shinyjs::show("Modeff_panel")
shinyjs::hide("Profile_panel")
shinyjs::hide("Retro_panel")
shinyjs::hide("Sensi_Comparison_panel")
shinyjs::hide("Ensemble_panel")
hideTab(inputId = "tabs", target = "11")
showTab(inputId = "tabs", target = "2")
# showTab(inputId = "tabs", target = "3")
# showTab(inputId = "tabs", target = "4")
# showTab(inputId = "tabs", target = "5")
# showTab(inputId = "tabs", target = "6")
})
#Profiles
observeEvent(req((as.numeric(input$tabs)*4/4)==4), {
shinyjs::hide("Data_panel")
shinyjs::hide("Existing_files")
shinyjs::hide("panel_Ct_F_LO")
shinyjs::hide("panel_data_wt_lt")
shinyjs::hide("panel_ct_wt_LO")
shinyjs::hide("panel_SSS")
shinyjs::hide("panel_SSLO_LH")
shinyjs::hide("panel_SSLO_fixed")
shinyjs::hide("panel_SS_LH_fixed_est_tog")
shinyjs::hide("panel_SS_LH_fixed")
shinyjs::hide("panel_SS_fixed")
shinyjs::hide("panel_SS_LH_est")
shinyjs::hide("panel_SS_est")
shinyjs::hide("panel_SS_stock_status")
shinyjs::hide("panel_SSS_prod")
shinyjs::hide("panel_SS_LO_prod")
shinyjs::hide("panel_SS_prod_fixed")
shinyjs::hide("panel_SS_prod_est")
shinyjs::hide("panel_selectivity")
shinyjs::hide("panel_selectivity_sss")
shinyjs::hide("panel_SS_recdevs")
shinyjs::hide("panel_SS_jitter")
shinyjs::hide("panel_RPs")
shinyjs::hide("panel_Forecasts")
shinyjs::hide("panel_Mod_dims")
shinyjs::hide("panel_advanced_SS")
shinyjs::hide("panel_advanced_user_SS")
shinyjs::hide("panel_advanced_SSS")
shinyjs::hide("OS_choice")
shinyjs::hide("Scenario_panel")
shinyjs::hide("panel_SSS_reps")
shinyjs::hide("run_SSS")
shinyjs::hide("run_SS")
shinyjs::hide("Modeff_panel")
shinyjs::show("Profile_panel")
shinyjs::hide("Retro_panel")
shinyjs::hide("Sensi_Comparison_panel")
shinyjs::hide("Ensemble_panel")
hideTab(inputId = "tabs", target = "11")
showTab(inputId = "tabs", target = "2")
# showTab(inputId = "tabs", target = "3")
# showTab(inputId = "tabs", target = "4")
# showTab(inputId = "tabs", target = "5")
# showTab(inputId = "tabs", target = "6")
})
#Retrospecitves
observeEvent(req((as.numeric(input$tabs)*5/5)==5), {
shinyjs::hide("Data_panel")
shinyjs::hide("Existing_files")
shinyjs::hide("panel_Ct_F_LO")
shinyjs::hide("panel_data_wt_lt")
shinyjs::hide("panel_ct_wt_LO")
shinyjs::hide("panel_SSS")
shinyjs::hide("panel_SSLO_LH")
shinyjs::hide("panel_SSLO_fixed")
shinyjs::hide("panel_SS_LH_fixed_est_tog")
shinyjs::hide("panel_SS_LH_fixed")
shinyjs::hide("panel_SS_fixed")
shinyjs::hide("panel_SS_LH_est")
shinyjs::hide("panel_SS_est")
shinyjs::hide("panel_SS_stock_status")
shinyjs::hide("panel_SSS_prod")
shinyjs::hide("panel_SS_LO_prod")
shinyjs::hide("panel_SS_prod_fixed")
shinyjs::hide("panel_SS_prod_est")
shinyjs::hide("panel_selectivity")
shinyjs::hide("panel_selectivity_sss")
shinyjs::hide("panel_SS_recdevs")
shinyjs::hide("panel_SS_jitter")
shinyjs::hide("panel_RPs")
shinyjs::hide("panel_Forecasts")
shinyjs::hide("panel_Mod_dims")
shinyjs::hide("panel_advanced_SS")
shinyjs::hide("panel_advanced_user_SS")
shinyjs::hide("panel_advanced_SSS")
shinyjs::hide("OS_choice")
shinyjs::hide("Scenario_panel")
shinyjs::hide("panel_SSS_reps")
shinyjs::hide("run_SSS")
shinyjs::hide("run_SS")
shinyjs::hide("Modeff_panel")
shinyjs::hide("Profile_panel")
shinyjs::show("Retro_panel")
shinyjs::hide("Sensi_Comparison_panel")
shinyjs::hide("Ensemble_panel")
hideTab(inputId = "tabs", target = "11")
showTab(inputId = "tabs", target = "2")
# showTab(inputId = "tabs", target = "3")
# showTab(inputId = "tabs", target = "4")
# showTab(inputId = "tabs", target = "5")
# showTab(inputId = "tabs", target = "6")
})
#Sensitivities
observeEvent(req((as.numeric(input$tabs)*6/6)==6), {
shinyjs::hide("Data_panel")
shinyjs::hide("Existing_files")
shinyjs::hide("panel_Ct_F_LO")
shinyjs::hide("panel_data_wt_lt")
shinyjs::hide("panel_ct_wt_LO")
shinyjs::hide("panel_SSS")
shinyjs::hide("panel_SSLO_LH")
shinyjs::hide("panel_SSLO_fixed")
shinyjs::hide("panel_SS_LH_fixed_est_tog")
shinyjs::hide("panel_SS_LH_fixed")
shinyjs::hide("panel_SS_fixed")
shinyjs::hide("panel_SS_LH_est")
shinyjs::hide("panel_SS_est")
shinyjs::hide("panel_SS_stock_status")
shinyjs::hide("panel_SSS_prod")
shinyjs::hide("panel_SS_LO_prod")
shinyjs::hide("panel_SS_prod_fixed")
shinyjs::hide("panel_SS_prod_est")
shinyjs::hide("panel_selectivity")
shinyjs::hide("panel_selectivity_sss")
shinyjs::hide("panel_SS_recdevs")
shinyjs::hide("panel_SS_jitter")
shinyjs::hide("panel_RPs")
shinyjs::hide("panel_Forecasts")
shinyjs::hide("panel_Mod_dims")
shinyjs::hide("panel_advanced_SS")
shinyjs::hide("panel_advanced_user_SS")
shinyjs::hide("panel_advanced_SSS")
shinyjs::hide("OS_choice")
shinyjs::hide("Scenario_panel")
shinyjs::hide("panel_SSS_reps")
shinyjs::hide("run_SSS")
shinyjs::hide("run_SS")
shinyjs::hide("Modeff_panel")
shinyjs::hide("Profile_panel")
shinyjs::hide("Retro_panel")
shinyjs::show("Sensi_Comparison_panel")
shinyjs::hide("Ensemble_panel")
hideTab(inputId = "tabs", target = "11")
showTab(inputId = "tabs", target = "2")
# showTab(inputId = "tabs", target = "3")
# showTab(inputId = "tabs", target = "4")
# showTab(inputId = "tabs", target = "5")
# showTab(inputId = "tabs", target = "6")
})
#Ensembles
observeEvent(req((as.numeric(input$tabs)*7/7)==7), {
shinyjs::hide("Data_panel")
shinyjs::hide("Existing_files")
shinyjs::hide("panel_Ct_F_LO")
shinyjs::hide("panel_data_wt_lt")
shinyjs::hide("panel_ct_wt_LO")
shinyjs::hide("panel_SSS")
shinyjs::hide("panel_SSLO_LH")
shinyjs::hide("panel_SSLO_fixed")
shinyjs::hide("panel_SS_LH_fixed_est_tog")
shinyjs::hide("panel_SS_LH_fixed")
shinyjs::hide("panel_SS_fixed")
shinyjs::hide("panel_SS_LH_est")
shinyjs::hide("panel_SS_est")
shinyjs::hide("panel_SS_stock_status")
shinyjs::hide("panel_SSS_prod")
shinyjs::hide("panel_SS_LO_prod")
shinyjs::hide("panel_SS_prod_fixed")
shinyjs::hide("panel_SS_prod_est")
shinyjs::hide("panel_selectivity")
shinyjs::hide("panel_selectivity_sss")
shinyjs::hide("panel_SS_recdevs")
shinyjs::hide("panel_SS_jitter")
shinyjs::hide("panel_RPs")
shinyjs::hide("panel_Forecasts")
shinyjs::hide("panel_Mod_dims")
shinyjs::hide("panel_advanced_SS")
shinyjs::hide("panel_advanced_user_SS")
shinyjs::hide("panel_advanced_SSS")
shinyjs::hide("panel_SSS_reps")
shinyjs::hide("OS_choice")
shinyjs::hide("Scenario_panel")
shinyjs::hide("run_SSS")
shinyjs::hide("run_SS")
shinyjs::hide("Modeff_panel")
shinyjs::hide("Profile_panel")
shinyjs::hide("Retro_panel")
shinyjs::hide("Sensi_Comparison_panel")
shinyjs::show("Ensemble_panel")
hideTab(inputId = "tabs", target = "11")
showTab(inputId = "tabs", target = "2")
# showTab(inputId = "tabs", target = "3")
# showTab(inputId = "tabs", target = "4")
# showTab(inputId = "tabs", target = "5")
# showTab(inputId = "tabs", target = "6")
})
########################################
#############################
######### UI INPUTS #########
#############################
# User activated pop-up parameter values ---------------
#Model dimensions
output$Model_dims1 <- renderUI({
inFile1 = rv.Lt$data
inFile2 = rv.Ct$data
inFile3 = rv.Age$data
#No file inputs
if (is.null(inFile1) & is.null(inFile2) & is.null(inFile3)) return(NULL)
#If have lengths and/or ages, but no catches
if (any(!is.null(inFile1), !is.null(inFile3))& is.null(inFile2)){
styr.in = min(inFile1[,1],inFile3[,1])
endyr.in = max(inFile1[,1],inFile3[,1])
# if(!(anyNA(c(Linf(), k_vbgf(),t0_vbgf())))& input$Ct_F_LO_select=="Constant Catch"){
# styr.in = min(inFile1[,1],inFile3[,1])-round(VBGF.age(Linf(), k_vbgf(), t0_vbgf(), Linf()*0.95))
# }
}
#If have catches
if (!is.null(inFile2)){
styr.in<-min(inFile2[,1])
endyr.in<-max(inFile2[,1])
}
#If lengths or ages with catches
if (!is.null(inFile1) &!is.null(inFile2)|!is.null(inFile3) &!is.null(inFile2)){
styr.in<-min(inFile1[,1],inFile2[,1],inFile3[,1])
endyr.in<-max(inFile1[,1],inFile2[,1],inFile3[,1])
}
fluidRow(column(width=4, numericInput("styr", "Starting year",
value=styr.in, min=1, max=10000, step=1)),
column(width=4, numericInput("endyr","Ending year",
value=endyr.in, min=1, max=10000, step=1)))
# if (!is.null(inFile2)){
# fluidRow(column(width=4, numericInput("styr", "Starting year",
# value=min(inFile2[,1]), min=1, max=10000, step=1)),
# column(width=4, numericInput("endyr", "Ending year",
# value=max(inFile2[,1]), min=1, max=10000, step=1)))
# }
# print(styr.in)
# print(endyr.in)
})
output$Model_dims2 <- renderUI({
Ct.data = rv.Ct$data
# if (is.null(Ct.data)) return(NULL)
if (!is.null(Ct.data)){
fluidRow(column(width=4, numericInput("styr", "Starting year",
value=min(Ct.data[,1]), min=1, max=10000, step=1)),
column(width=4, numericInput("endyr", "Ending year",
value=max(Ct.data[,1]), min=1, max=10000, step=1)))
}
})
# output$Female_parms_inputs_label <- reactive({
# if(!is.null(input$file1))
# {
# (output$Female_parms_inputs_label<- renderUI({
# fluidRow(column(width=6,numericInput("Nages","Max. age", value=NA,min=1, max=1000, step=1)),
# column(width=6,numericInput("M_f", "Natural mortality", value=NA,min=0, max=10000, step=0.01)))
# }))
# }
# })
#Male life history parameters
output$Male_parms_inputs_label <- renderUI({
if(input$male_parms){
h5(em("Male"))
}
})
output$Male_parms_inputs1 <- renderUI({
if(input$male_parms){
fluidRow(column(width=6, numericInput("M_m", "Natural mortality",
value=NA, min=0, max=10000, step=0.01)),
column(width=6, numericInput("Linf_m", "Asymptotic size (Linf)",
value=NA, min=0, max=10000, step=0.01)))
}
})
output$Male_parms_inputs2 <- renderUI({
if(input$male_parms){
fluidRow(column(width=6, numericInput("k_m", "Growth coefficient k",
value=NA, min=0, max=10000, step=0.01)),
column(width=6, numericInput("t0_m", "Age at length 0 (t0)",
value=NA, min=0, max=10000, step=0.01)))
}
})
output$Male_parms_inputs3 <- renderUI({
if(input$male_parms){
fluidRow(column(width=6, textInput("CV_lt_m", "CV at length (young then old)", value="0.1,0.1")))
}
})
output$Male_parms_inputs4 <- renderUI({
if(input$male_parms){
fluidRow(column(width=6, numericInput("WLa_m", "Weight-length alpha",
value=0.00001, min=0, max=10000, step=0.000000001)),
column(width=6, numericInput("WLb_m", "Weight-length beta",
value=3, min=0, max=10000, step=0.01)))
}
})
output$Male_parms_inputs_label_fix <- renderUI({
if(input$male_parms_fix){
h5(em("Male"))
}
})
output$Male_parms_inputs1_fix <- renderUI({
if(input$male_parms_fix){
fluidRow(column(width=6, numericInput("M_m_fix", "Natural mortality",
value=NA, min=0, max=10000, step=0.01)),
column(width=6, numericInput("Linf_m_fix", "Asymptotic size (Linf)",
value=NA, min=0, max=10000, step=0.01)))
}
})
output$Male_parms_inputs2_fix <- renderUI({
if(input$male_parms_fix){
fluidRow(column(width=6, numericInput("k_m_fix", "Growth coefficient k",
value=NA, min=0, max=10000, step=0.01)),
column(width=6, numericInput("t0_m_fix", "Age at length 0 (t0)",
value=NA, min=0, max=10000, step=0.01)))
}
})
output$Male_parms_inputs3_fix <- renderUI({
if(input$male_parms_fix){
fluidRow(column(width=6, textInput("CV_lt_m_fix", "CV at length (young then old)", value="0.1,0.1")))
}
})
output$Male_parms_inputs4_fix <- renderUI({
if(input$male_parms_fix){
fluidRow(column(width=6, numericInput("WLa_m_fix", "Weight-Length alpha",
value=0.00001, min=0, max=10000, step=0.000000001)),
column(width=6, numericInput("WLb_m_fix", "Weight-length beta",
value=3, min=0, max=10000, step=0.01)))
}
})
output$Male_parms_inputs_label_est <- renderUI({
if(input$male_parms_est){
h4(em("Male"))
}
})
output$Male_parms_inputs_M_est <- renderUI({
if(input$male_parms_est){
dropdownButton(
selectInput("M_m_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal")),
numericInput("M_m_mean", "Mean", value=NA,min=0, max=10000, step=0.001),
numericInput("M_m_SD", "SD", value=0,min=0, max=10000, step=0.001),
numericInput("M_m_phase", "Phase", value=-1,min=-999, max=10, step=0.001),
circle = FALSE, right=TRUE, status = "danger", icon = icon("skull-crossbones"), width = "300px",label="Natural mortality"
)
}
})
output$Male_parms_inputs_space1 <- renderUI({
if(input$male_parms_est){
br()
}
})
output$Male_parms_inputs_space2 <- renderUI({
if(input$male_parms_est){
br()
}
})
output$Male_parms_inputs_space3 <- renderUI({
if(input$male_parms_est){
br()
}
})
output$Male_parms_inputs_space4 <- renderUI({
if(input$male_parms_est){
br()
}
})
output$Male_parms_inputs_space5 <- renderUI({
if(input$male_parms_est){
br()
}
})
output$Male_parms_inputs_Growth_label <- renderUI({
if(input$male_parms_est){
h5(strong("Growth"))
}
})
output$Male_parms_inputs_Linf_est <- renderUI({
if(input$male_parms_est){
dropdownButton(
selectInput("Linf_m_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal")),
numericInput("Linf_m_mean", "Mean", value=NA,min=0, max=10000, step=0.001),
numericInput("Linf_m_SD", "SD", value=0,min=0, max=10000, step=0.001),
numericInput("Linf_m_phase", "Phase", value=-1,min=-999, max=10, step=0.001),
circle = FALSE, right=TRUE, status = "danger", icon = icon("infinity"), width = "300px",label="Linf: Asymptotic size"
)
}
})
output$Male_parms_inputs_k_est <- renderUI({
if(input$male_parms_est){
dropdownButton(
selectInput("k_m_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal")),
numericInput("k_m_mean", "Mean", value=NA,min=0, max=10000, step=0.001),
numericInput("k_m_SD", "SD", value=0,min=0, max=10000, step=0.001),
numericInput("k_m_phase", "Phase", value=-1,min=-999, max=10, step=0.001),
circle = FALSE, right=TRUE, status = "danger", icon = icon("ruler-horizontal"), width = "300px",label="k: VB growth coefficient"
)
}
})
output$Male_parms_inputs_t0_est <- renderUI({
if(input$male_parms_est){
dropdownButton(
selectInput("t0_m_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal")),
numericInput("t0_m_mean", "Mean", value=0,min=-100, max=100, step=0.001),
numericInput("t0_m_SD", "SD", value=0,min=0, max=100, step=0.001),
numericInput("t0_m_phase", "Phase", value=-1,min=-999, max=100, step=0.001),
circle = FALSE, right=TRUE, status = "danger", icon = icon("baby-carriage"), width = "300px",label="t0: Age at size 0"
)
}
})
output$Male_parms_inputs_CV_est_young <- renderUI({
if(input$male_parms_est){
dropdownButton(
selectInput("CV_lt_m_young_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal")),
numericInput("CV_lt_m_young_mean", "Mean", value=0.1,min=0, max=10000, step=0.001),
numericInput("CV_lt_m_young_SD", "SD", value=0,min=0, max=10000, step=0.001),
numericInput("CV_lt_m_young_phase", "Phase", value=-1,min=-999, max=10, step=0.001),
circle = FALSE, right=TRUE, status = "danger", icon = icon("dice"), width = "300px",label="CV at length (young)"
)
}
})
output$Male_parms_inputs_CV_est_old <- renderUI({
if(input$male_parms_est){
dropdownButton(
selectInput("CV_lt_m_old_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal")),
numericInput("CV_lt_m_old_mean", "Mean", value=0.1,min=0, max=10000, step=0.001),
numericInput("CV_lt_m_old_SD", "SD", value=0,min=0, max=10000, step=0.001),
numericInput("CV_lt_m_old_phase", "Phase", value=-1,min=-999, max=10, step=0.001),
circle = FALSE, right=TRUE, status = "danger", icon = icon("dice"), width = "300px",label="CV at length (old)"
)
}
})
output$Male_parms_inputs_WL_est <- renderUI({
if(input$male_parms_est){
fluidRow(column(width=6, numericInput("WLa_m_est", "Weight-length alpha",
value=0.00001, min=0, max=10000, step=0.000000001)),
column(width=6, numericInput("WLb_m_est", "Weight-length beta",
value=3, min=0, max=10000, step=0.01)))
}
})
#h5(strong("M")),
# fluidRow(column(width=4,style='padding:1px;',align="center", selectInput("M_m_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal"))),
# column(width=3,style='padding:2px;',align="center",numericInput("M_m_mean", "Mean", value=NA,min=0, max=10000, step=0.001)),
# column(width=3,style='padding:2px;',align="center",numericInput("M_m_SD", "SD", value=0,min=0, max=10000, step=0.001)),
# column(width=2,style='padding:2px;',align="center",numericInput("M_m_phase", "Phase", value=-1,min=-999, max=10, step=0.001)))
# }
# })
# output$Male_parms_inputs_Linf_est <- renderUI({
# if(input$male_parms_est){
# #h5(strong("Linf")),
# fluidRow(column(width=4,style='padding:1px;',align="center",selectInput("Linf_m_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal"))),
# column(width=3,style='padding:2px;',align="center",numericInput("Linf_m_mean", "Mean", value=NA,min=0, max=10000, step=0.001)),
# column(width=3,style='padding:2px;',align="center",numericInput("Linf_m_SD", "SD", value=0,min=0, max=10000, step=0.001)),
# column(width=2,style='padding:2px;',align="center",numericInput("Linf_m_phase", "Phase", value=-1,min=-999, max=10, step=0.001)))
# }
# })
# output$Male_parms_inputs_k_est <- renderUI({
# if(input$male_parms_est){
# #h5(strong("k")),
# fluidRow(column(width=4,style='padding:2px;',selectInput("k_m_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal"))),
# column(width=3,style='padding:2px;',numericInput("k_m_mean", "Mean", value=NA,min=0, max=10000, step=0.001)),
# column(width=3,style='padding:2px;',numericInput("k_m_SD", "SD", value=0,min=0, max=10000, step=0.001)),
# column(width=2,style='padding:2px;',align="center",numericInput("k_m_phase", "Phase", value=-1,min=-999, max=10, step=0.001)))
# }
# })
# output$Male_parms_inputs_t0_est <- renderUI({
# if(input$male_parms_est){
# #h5(strong("t0")),
# fluidRow(column(width=4,style='padding:2px;',selectInput("t0_m_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal"))),
# column(width=3,style='padding:2px;',numericInput("t0_m_mean", "Mean", value=NA,min=0, max=10000, step=0.001)),
# column(width=3,style='padding:2px;',numericInput("t0_m_SD", "SD", value=0,min=0, max=10000, step=0.001)),
# column(width=2,style='padding:2px;',align="center",numericInput("t0_m_phase", "Phase", value=-1,min=-999, max=10, step=0.001)))
# }
# })
# output$Male_parms_inputs_CV_est <- renderUI({
# if(input$male_parms_est){
# #h5(strong("Length CV")),
# fluidRow(column(width=4,style='padding:2px;',selectInput("CV_lt_m_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal"))),
# column(width=3,style='padding:2px;',numericInput("CV_lt_m_mean", "Mean", value=0.1,min=0, max=10000, step=0.001)),
# column(width=3,style='padding:2px;',numericInput("CV_lt_m_SD", "SD", value=0,min=0, max=10000, step=0.001)),
# column(width=2,style='padding:2px;',align="center",numericInput("CV_lt_m_phase", "Phase", value=-1,min=-999, max=10, step=0.001)))
# }
# })
#Male life history parameters
output$Male_parms_inputs_label_SSS<- renderUI({
if(input$male_parms_SSS){
h5(em("Male"))
}
})
output$Male_parms_inputs_M_SSS<- renderUI({
if(input$male_parms_SSS){
dropdownButton(
selectInput("M_m_prior_sss","Prior type",c("lognormal","normal","uniform","no prior")),
numericInput("M_m_mean_sss", "Mean", value=NA,min=0, max=10000, step=0.001),
numericInput("M_m_SD_sss", "SD", value=0.44,min=0, max=10000, step=0.001),
circle = FALSE, right=TRUE, status = "danger", icon = icon("skull-crossbones"), width = "300px",label="Natural mortality"
)
}
})
output$Male_parms_inputs_space1_SSS <- renderUI({
if(input$male_parms_SSS){
br()
}
})
output$Male_parms_inputs_space2_SSS <- renderUI({
if(input$male_parms_SSS){
br()
}
})
output$Male_parms_inputs_space3_SSS <- renderUI({
if(input$male_parms_SSS){
br()
}
})
output$Male_parms_inputs_space4_SSS <- renderUI({
if(input$male_parms_SSS){
br()
}
})
output$Male_parms_inputs_space5_SSS <- renderUI({
if(input$male_parms_SSS){
br()
}
})
output$Male_parms_inputs_Growth_label_SSS <- renderUI({
if(input$male_parms_SSS){
h5(strong("Growth"))
}
})
output$Male_parms_inputs_Linf_SSS <- renderUI({
if(input$male_parms_SSS){
dropdownButton(
selectInput("Linf_m_prior_sss","Prior type",c("no prior","normal")),
numericInput("Linf_m_mean_sss", "Mean", value=NA,min=0, max=10000, step=0.001),
numericInput("Linf_m_SD_sss", "SD", value=0,min=0, max=10000, step=0.001),
circle = FALSE, right=TRUE, status = "danger", icon = icon("infinity"), width = "300px",label="Linf: Asymptotic size"
)
}
})
output$Male_parms_inputs_k_SSS <- renderUI({
if(input$male_parms_SSS){
dropdownButton(
selectInput("k_m_prior_sss","Prior type",c("no prior","normal")),
numericInput("k_m_mean_sss", "Mean", value=NA,min=0, max=10000, step=0.001),
numericInput("k_m_SD_sss", "SD", value=0,min=0, max=10000, step=0.001),
circle = FALSE, right=TRUE, status = "danger", icon = icon("ruler-horizontal"), width = "300px",label="k: VB growth coefficient"
)
}
})
output$Male_parms_inputs_t0_SSS <- renderUI({
if(input$male_parms_SSS){
dropdownButton(
selectInput("t0_m_prior_sss","Prior type",c("no prior","normal")),
numericInput("t0_m_mean_sss", "Mean", value=0,min=-100, max=100, step=0.001),
numericInput("t0_m_SD_sss", "SD", value=0,min=0, max=1000, step=0.001),
circle = FALSE, right=TRUE, status = "danger", icon = icon("baby-carriage"), width = "300px",label="t0: Age at size 0"
)
}
})
output$Male_parms_inputs_CV_young_SSS <- renderUI({
if(input$male_parms_SSS){
dropdownButton(
selectInput("CV_lt_m_young_prior_sss","Prior type",c("no prior")),
numericInput("CV_lt_m_young_mean_sss", "Mean", value=0.1,min=0, max=10000, step=0.001),
numericInput("CV_lt_m_young_SD_sss", "SD", value=0,min=0, max=10000, step=0.001),
circle = FALSE, right=TRUE, status = "danger", icon = icon("dice"), width = "300px",label="CV at length"
)
}
})
output$Male_parms_inputs_CV_old_SSS <- renderUI({
if(input$male_parms_SSS){
dropdownButton(
selectInput("CV_lt_m_old_prior_sss","Prior type",c("no prior")),
numericInput("CV_lt_m_old_mean_sss", "Mean", value=0.1,min=0, max=10000, step=0.001),
numericInput("CV_lt_m_old_SD_sss", "SD", value=0,min=0, max=10000, step=0.001),
circle = FALSE, right=TRUE, status = "danger", icon = icon("dice"), width = "300px",label="CV at length"
)
}
})
output$Male_parms_inputs_WL_SSS<- renderUI({
if(input$male_parms_SSS){
fluidRow(column(width=6,numericInput("WLa_m_sss", "Weight-Length alpha",
value=0.00001,min=0, max=10000, step=0.000000001)),
column(width=6,numericInput("WLb_m_sss","Weight-length beta",
value=3,min=0, max=10000, step=0.01)))
}
})
#Selectivity paramters
output$Sel_parms1 <- renderUI({
fluidRow(column(width=8, textInput("Sel50", "Length at 50% Selectivity",value="")),
column(width=4, textInput("Sel50_phase", "Est. phase", value="")))
})
output$Sel_parms2<- renderUI({
fluidRow(column(width=8, textInput("Selpeak", "Length at Peak Selectvity", value="")),
column(width=4, textInput("Selpeak_phase", "Est. phase", value="")))
})
output$Sel_parms3 <- renderUI({
if(input$Sel_choice=="Dome-shaped"){
fluidRow(column(width=8, textInput("PeakDesc", "Length at 1st declining selectivity",value="10000")),
column(width=4, textInput("PeakDesc_phase", "Est. phase",value="")))
}
})
output$Sel_parms4 <- renderUI({
if(input$Sel_choice=="Dome-shaped"){
fluidRow(column(width=8, textInput("LtPeakFinal", "Width of declining selectivity",value="0.0001")),
column(width=4, textInput("LtPeakFinal_phase", "Est. phase",value="")))
}
})
output$Sel_parms5 <- renderUI({
if(input$Sel_choice=="Dome-shaped"){
fluidRow(column(width=8, textInput("FinalSel", "Selectivity at max bin size",value="0.99999")),
column(width=4, textInput("FinalSel_phase", "Est. phase",value="")))
}
})
output$Sel_parms1_sss <- renderUI({
fluidRow(column(width=6, textInput("Sel50_sss", "Length at 50% Selectivity",value="")),
column(width=6, textInput("Selpeak_sss", "Length at Peak Selectvity", value="")))
})
output$Sel_parms2_sss <- renderUI({
if(input$Sel_choice_sss=="Dome-shaped"){
fluidRow(column(width=6, textInput("PeakDesc_sss", "Length at 1st declining selectivity",value="10000")),
column(width=6, textInput("LtPeakFinal_sss", "Width of declining selectivity",value="0.0001")))
}
})
output$Sel_parms3_sss <- renderUI({
if(input$Sel_choice_sss=="Dome-shaped"){
fluidRow(column(width=8, textInput("FinalSel_sss", "Selectivity at max bin size",value="0.99999")))
}
})
#Recruitment parameter inputs
output$Rec_options1 <- renderUI({
if(input$rec_choice){
fluidRow(column(width=6, numericInput("sigmaR", "Rec. varaibility (sR)",
value=0.5, min=0, max=10, step=0.01)))
}
})
output$Rec_options2 <- renderUI({
if(input$rec_choice){
fluidRow(column(width=6, numericInput("Rdev_startyr", "Rec. devs. start year",
value=input$styr, min=1, max=10000, step=1)),
column(width=6, numericInput("Rdev_endyr", "Rec. devs. end year",
value=input$endyr, min=1, max=10000, step=1)))
}
})
output$Rec_options3 <- renderUI({
if(input$biasC_choice){
fluidRow(column(width=6, numericInput("NobiasC_early", "Early last year",
value=input$styr, min=1, max=10000, step=1)),
column(width=6, numericInput("NobiasC_recent", "1st recent year",
value=input$endyr, min=1, max=10000, step=1)))
}
})
output$Rec_options4 <- renderUI({
if(input$biasC_choice){
fluidRow(column(width=6, numericInput("BiasC_startyr", "Start year",
value=input$styr, min=1, max=10000, step=1)),
column(width=6, numericInput("BiasC_endyr", "End year",
value=input$endyr, min=1, max=10000, step=1)))
}
})
output$Rec_options5 <- renderUI({
if(input$biasC_choice){
fluidRow(column(width=6, numericInput("BiasC","Maximum bias adjustment", value=1,min=0, max=1, step=0.001)))
}
})
output$Rec_options6 <- renderUI({
if(input$rec_choice){
fluidRow(column(width=6, selectInput("RecDevChoice","Recruit deviation option",c("1: Devs sum to zero","2: Simple deviations","3: deviation vector","4: option 3 plus penalties"),selected="1: Devs sum to zero")))
}
})
#Jitter value
output$Jitter_value <- renderUI({
if(input$jitter_choice){
fluidRow(column(width=6, numericInput("jitter_fraction", "Jitter value",
value=0.01, min=0, max=10, step=0.001)),
column(width=6, numericInput("Njitter", "# of jitters",
value=0, min=1, max=10000, step=1)))
}
})
#Choose reference points
output$RP_selection1<- renderUI({
if(input$RP_choices){
fluidRow(column(width=6, numericInput("SPR_target", "SPR target",
value=0.5, min=0, max=1, step=0.001)),
column(width=6, numericInput("B_target", "Biomass target",
value=0.4, min=0, max=1, step=0.001)))
}
})
output$RP_selection2<- renderUI({
if(input$RP_choices){
fluidRow(column(width=6,selectInput("CR_Ct_F","Control rule type",
c("1: Catch fxn of SSB, buffer on F",
"2: F fxn of SSB, buffer on F",
"3: Catch fxn of SSB, buffer on catch",
"4: F fxn of SSB, buffer on catch"))),
#column(width=4, numericInput("CR_Ct_F", "Control rule type",
# value=1, min=0, max=1, step=0.001)),
column(width=3, numericInput("slope_hi", "Upper ratio value",
value=0.4, min=0, max=1, step=0.001)),
column(width=3, numericInput("slope_low", "Lower ratio value",
value=0.1, min=0, max=1, step=0.001)))
}
})
output$Forecasts<- renderUI({
if(input$Forecast_choice){
fluidRow(column(width=6, numericInput("forecast_num", "# of forecast years",
value=2, min=1, max=1000, step=1)),
column(width=6, textInput("forecast_buffer", "Control rule buffer", value="1")))
}
})
output$AdvancedSS_nohess<- renderUI({
# if(input$advance_ss_click){
fluidRow(column(width=6, prettyCheckbox(
inputId = "no_hess", label = "Turn off Hessian (speeds up runs, but no variance estimation)",
shape = "round", outline = TRUE, status = "info")))
# }
})
output$AdvancedSS_nohess_user<- renderUI({
# if(input$advance_ss_click){
fluidRow(column(width=6, prettyCheckbox(
inputId = "no_hess", label = "Turn off Hessian (speeds up runs, but no variance estimation)",
shape = "round", outline = TRUE, status = "info")))
# }
})
output$AdvancedSS_addcomms<- renderUI({
fluidRow(column(width=6, prettyCheckbox(
inputId = "add_comms",
label = "Add additional SS run commands",
shape = "round", outline = TRUE, status = "info")))
})
output$AdvancedSS_addcomms_comms <- renderUI({
if(!is.null(input$add_comms)){
if(input$add_comms){
fluidRow(column(width=12, textInput("add_comms_in", "Enter additional run commands", value="")))
}
}
})
output$AdvancedSS_addcomms_user<- renderUI({
fluidRow(column(width=6, prettyCheckbox(
inputId = "add_comms",
label = "Add additional SS run commands",
shape = "round", outline = TRUE, status = "info")))
})
output$AdvancedSS_addcomms_comms_user <- renderUI({
if(!is.null(input$add_comms_user)){
if(input$add_comms_user){
fluidRow(column(width=12, textInput("add_comms_in", "Enter additional run commands", value="")))
}
}
})
output$AdvancedSS_noplots<- renderUI({
# if(input$advance_ss_click){
fluidRow(column(width=6, prettyCheckbox(
inputId = "no_plots_tables", label = "Turn off plots",
shape = "round", outline = TRUE, status = "info")))
# }
})
output$AdvancedSS_noplots_user<- renderUI({
# if(input$advance_ss_click){
fluidRow(column(width=6, prettyCheckbox(
inputId = "no_plots_tables", label = "Turn off plots",
shape = "round", outline = TRUE, status = "info")))
# }
})
output$AdvancedSS_noestabs<- renderUI({
# if(input$advance_ss_click){
fluidRow(column(width=6, prettyCheckbox(
inputId = "no_tables", label = "No exectutive summary tables",
shape = "round", outline = TRUE, status = "info")))
# }
})
output$AdvancedSS_noestabs_user<- renderUI({
# if(input$advance_ss_click){
fluidRow(column(width=6, prettyCheckbox(
inputId = "no_tables", label = "No exectutive summary tables",
shape = "round", outline = TRUE, status = "info")))
# }
})
output$AdvancedSS_par<- renderUI({
# if(input$advance_ss_click){
fluidRow(column(width=6, prettyCheckbox(
inputId = "use_par", label = "Use par file (i.e., parameter file from previous run)?",
shape = "round", outline = TRUE, status = "info")))
# }
})
output$AdvancedSS_par_user<- renderUI({
# if(input$advance_ss_click){
fluidRow(column(width=6, prettyCheckbox(
inputId = "use_par", label = "Use par file (i.e., parameter file from previous run)?",
shape = "round", outline = TRUE, status = "info")))
# }
})
output$AdvancedSS_phase0<- renderUI({
# if(input$advance_ss_click){
fluidRow(column(width=6, prettyCheckbox(
inputId = "use_phase0", label = "Turn off estimation of all parameters (phase = 0)?",
shape = "round", outline = TRUE, status = "info")))
# }
})
output$AdvancedSS_phase0_user<- renderUI({
# if(input$advance_ss_click){
fluidRow(column(width=6, prettyCheckbox(
inputId = "use_phase0", label = "Turn off estimation of all parameters (phase = 0)?",
shape = "round", outline = TRUE, status = "info")))
# }
})
output$AdvancedSS_datanew<- renderUI({
# if(input$advance_ss_click){
fluidRow(column(width=6, prettyCheckbox(
inputId = "use_datanew", label = "Use the data_echo.ss_new file?",
shape = "round", outline = TRUE, status = "info")))
# }
})
output$AdvancedSS_datanew_user<- renderUI({
# if(input$advance_ss_click){
fluidRow(column(width=6, prettyCheckbox(
inputId = "use_datanew", label = "Use the data_echo.ss_new file?",
shape = "round", outline = TRUE, status = "info")))
# }
})
output$AdvancedSS_controlnew<- renderUI({
# if(input$advance_ss_click){
fluidRow(column(width=6, prettyCheckbox(
inputId = "use_controlnew", label = "Use the control.ss_new file?",
shape = "round", outline = TRUE, status = "info")))
# }
})
output$AdvancedSS_controlnew_user<- renderUI({
# if(input$advance_ss_click){
fluidRow(column(width=6, prettyCheckbox(
inputId = "use_controlnew", label = "Use the control.ss_new file?",
shape = "round", outline = TRUE, status = "info")))
# }
})
output$AdvancedSS_forecastnew<- renderUI({
# if(input$advance_ss_click){
fluidRow(column(width=6, prettyCheckbox(
inputId = "use_forecastnew", label = "Use the forecast.ss_new file?",
shape = "round", outline = TRUE, status = "info")))
# }
})
output$AdvancedSS_forecastnew_user<- renderUI({
# if(input$advance_ss_click){
fluidRow(column(width=6, prettyCheckbox(
inputId = "use_forecastnew", label = "Use the forecast.ss_new file?",
shape = "round", outline = TRUE, status = "info")))
# }
})
output$AdvancedSS_GT1<- renderUI({
# if(input$advance_ss_click){
fluidRow(column(width=6, prettyCheckbox(
inputId = "GT1", label = "Use only one growth type (default is 5)",
shape = "round", outline = TRUE, status = "info")))
# }
})
output$AdvancedSS_GT5_SSS<- renderUI({
# if(input$advance_ss_click){
fluidRow(column(width=6, prettyCheckbox(
inputId = "GT5", label = "Use 5 growth types (default is 1)",
shape = "round", outline = TRUE, status = "info")))
# }
})
output$AdvancedSS_Sex3<- renderUI({
# if(input$advance_ss_click){
fluidRow(column(width=6, prettyCheckbox(
inputId = "Sex3", label = "Retain sex ratio in length compositions (Sex option = 3)",
shape = "round", outline = TRUE, status = "info")))
# }
})
output$AdvancedSS_Indexvar<- renderUI({
# if(input$advance_ss_click){
fluidRow(column(width=6, prettyCheckbox(
inputId = "Indexvar", label = "Estimate additional variance on each abundance index?",
shape = "round", outline = TRUE, status = "info")))
# }
})
output$AdvancedSS_ageerror<- renderUI({
fluidRow(column(width=12, prettyCheckbox(
inputId = "Ageing_error_choice", label = "Add custom ageing error matrices?",
shape = "round", outline = TRUE, status = "info")))
})
output$AdvancedSS_ageerror_in <- renderUI({
if(!is.null(input$Ageing_error_choice)){
if(input$Ageing_error_choice){
#h4(strong("Choose data file")),
fluidRow(column(width=12,fileInput('file33', 'Ageing error file',
accept = c(
'text/csv',
'text/comma-separated-values',
'text/tab-separated-values',
'text/plain',
'.csv'
)
)))
}
}
})
output$AdvancedSS_Ctunits<- renderUI({
fluidRow(column(width=12, prettyCheckbox(
inputId = "Ct_units_choice", label = "Specify catch units (1=biomass (default); 2=numbers) for each fleet?",
shape = "round", outline = TRUE, status = "info")))
})
output$AdvancedSS_Ctunitsfleets <- renderUI({
if(!is.null(input$Ct_units_choice)){
if(input$Ct_units_choice){
fluidRow(column(width=12, textInput("fleet_ct_units", "Enter catch units for each fleet", value="")))
}
}
})
output$AdvancedSS_Ctunits_SSS<- renderUI({
fluidRow(column(width=12, prettyCheckbox(
inputId = "Ct_units_choice_SSS", label = "Specify catch units for each fleet?",
shape = "round", outline = TRUE, status = "info")))
})
output$AdvancedSS_Ctunitsfleets_SSS<- renderUI({
if(!is.null(input$Ct_units_choice_SSS)){
if(input$Ct_units_choice_SSS){
fluidRow(column(width=12, textInput("fleet_ct_units_SSS", "Enter catch units for each fleet (1=biomass; 2=numbers)", value="")))
}
}
})
output$AdvancedSS_retro_choice<- renderUI({
fluidRow(column(width=6, prettyCheckbox(
inputId = "Retro_choice", label = "Do retrospective runs? Input minus from current year",
shape = "round", outline = TRUE, status = "info")))
})
output$AdvancedSS_retro_years <- renderUI({
if(!is.null(input$Retro_choice)){
if(input$Retro_choice){
fluidRow(column(width=6, numericInput("first_retro_year", "1st retro year",
value=-1, min=-1, max=-500, step=-1)),
column(width=6, numericInput("final_retro_year", "Last retro year",
value=-10, min=-1, max=-500, step=-1)))
}
}
})
output$AdvancedSS_retro_choice_user <- renderUI({
fluidRow(column(width=6, prettyCheckbox(
inputId = "Retro_choice", label = "Do retrospective runs? Input minus from current year",
shape = "round", outline = TRUE, status = "info")))
})
output$AdvancedSS_retro_years_user <- renderUI({
if(!is.null(input$Retro_choice)){
if(input$Retro_choice){
fluidRow(column(width=6, numericInput("first_retro_year", "1st retro year",
value=-1, min=-1, max=-500, step=-1)),
column(width=6, numericInput("final_retro_year", "Last retro year",
value=-10, min=-1, max=-500, step=-1)))
}
}
})
output$AdvancedSS_Ltbin <- renderUI({
# if(input$advance_ss_click){
if(!is.null(rv.Lt$data)){bin.step<-as.numeric(colnames(rv.Lt$data)[7])-as.numeric(colnames(rv.Lt$data)[6])}
if(is.null(rv.Lt$data)){bin.step<-2}
fluidRow(column(width=4, numericInput("lt_bin_size", "bin size",
value=bin.step, min=0, max=10000, step=1)),
column(width=4, numericInput("lt_min_bin", "minimum bin",
value=4, min=0, max=10000, step=0.01)),
column(width=4, numericInput("lt_max_bin", "maximum bin",
value=2*(round((Linf()+(Linf()*0.2326))/2))+2, min=0, max=10000, step=0.01)))
# }
})
output$Profile_multi_values <- renderUI({
#if(!is.null(input$multi_profile)){
# if(input$multi_profile){
#h4(strong("Choose data file")),
fluidRow(column(width=12,fileInput('file_multi_profile', 'Profile input values',
accept = c(
'text/csv',
'text/comma-separated-values',
'text/tab-separated-values',
'text/plain',
'.csv'
)
)))
# }
# }
})
# roots <- getVolumes()()
###############################################
###############################################
###############################################
################# PARAMETERS ##################
###############################################
FleetNs<-reactive({
if(all(c(is.null(rv.Ct$data[,2],rv.Lt$data[,3],rv.Age$data[,3],rv.Index$data[,3])))) return(NULL)
fleetnum<-rep(1,max(rv.Lt$data[,3],rv.Age$data[,3],rv.Index$data[,3]))
FleetNs<-paste(as.character(fleetnum), collapse=",")
#print(FleetNs)
FleetNs
})
Nages<-reactive({
Nages<-NA
if(all(c(is.null(input$M_f),is.null(input$M_f_fix),is.null(input$M_f_mean),is.null(input$M_f_mean_sss),is.null(rv.Age$data)))) return(NULL)
if(!is.na(input$M_f)) {Nages<-ceiling(5.4/input$M_f)}
if(!is.na(input$M_f_fix)) {Nages<-ceiling(5.4/input$M_f_fix)}
if(!is.na(input$M_f_mean)) {Nages<-ceiling(5.4/input$M_f_mean)}
if(!is.na(input$M_f_mean_sss)) {Nages<-ceiling(5.4/input$M_f_mean_sss)}
if(!is.null(rv.Age$data))
{
Nages_in<-max(as.numeric(colnames(rv.Age$data[,9:ncol(rv.Age$data)])))
if(!is.na(Nages)&Nages_in>Nages){Nages<-Nages_in}
if(is.na(Nages)){Nages<-Nages_in}
}
Nages
})
M_f_in<-reactive({
M_f_in<-NA
if(all(c(is.null(input$M_f),is.null(input$M_f_fix),is.null(input$M_f_mean),is.null(input$M_f_mean_sss)))) return(NULL)
if(!is.na(input$M_f)) {M_f_in<-input$M_f}
if(!is.na(input$M_f_fix)) {M_f_in<-input$M_f_fix}
if(!is.na(input$M_f_mean)) {M_f_in<-input$M_f_mean}
if(!is.na(input$M_f_mean_sss)) {M_f_in<-input$M_f_mean_sss}
M_f_in
})
M_m_in<-reactive({
M_m_in<-NA
if(all(c(is.null(input$M_m),is.null(input$M_m_fix),is.null(input$M_m_mean),is.null(input$M_m_mean_sss)))) return(NULL)
if(any(input$male_parms&!is.na(input$M_m))) {M_m_in<-input$M_m}
if(any(input$male_parms_fix&!is.na(input$M_m_fix))) {M_m_in<-input$M_m_fix}
if(any(input$male_parms_est&!is.na(input$M_m_mean))) {M_m_in<-input$M_m_mean}
if(any(input$male_parms_SSS&!is.na(input$M_m_mean_sss))) {M_m_in<-input$M_m_mean_sss}
M_m_in
})
Linf<-reactive({
Linf<-NA
if(all(c(is.null(input$Linf_f),is.null(input$Linf_f_fix),is.null(input$Linf_f_mean),is.null(input$Linf_f_mean_sss)))) return(NULL)
if(!is.na(input$Linf_f)) {Linf<-input$Linf_f}
if(!is.na(input$Linf_f_fix)) {Linf<-input$Linf_f_fix}
if(!is.na(input$Linf_f_mean)) {Linf<-input$Linf_f_mean}
if(!is.na(input$Linf_f_mean_sss)) {Linf<-input$Linf_f_mean_sss}
Linf
})
Linf_m_in<-reactive({
Linf_m_in<-NA
if(all(c(is.null(input$Linf_m),is.null(input$Linf_m_fix),is.null(input$Linf_m_mean),is.null(input$Linf_m_mean_sss)))) return(NULL)
if(any(input$male_parms&!is.na(input$Linf_m))) {Linf_m_in<-input$Linf_m}
if(any(input$male_parms_fix&!is.na(input$Linf_m_fix))) {Linf_m_in<-input$Linf_m_fix}
if(any(input$male_parms_est&!is.na(input$Linf_m_mean))) {Linf_m_in<-input$Linf_m_mean}
if(any(input$male_parms_SSS&!is.na(input$Linf_m_mean_sss))) {Linf_m_in<-input$Linf_m_mean_sss}
Linf_m_in
})
k_vbgf<-reactive({
k_vbgf<-NA
if(all(c(is.null(input$k_f),is.null(input$k_f_fix),is.null(input$k_f_mean),is.null(input$k_f_mean_sss)))) return(NULL)
if(!is.na(input$k_f)) {k_vbgf<-input$k_f}
if(!is.na(input$k_f_fix)) {k_vbgf<-input$k_f_fix}
if(!is.na(input$k_f_mean)) {k_vbgf<-input$k_f_mean}
if(!is.na(input$k_f_mean_sss)) {k_vbgf<-input$k_f_mean_sss}
k_vbgf
})
#Process life history input for plots
k_vbgf_m_in<-reactive({
k_vbgf_m_in<-NA
if(all(c(is.null(input$k_m),is.null(input$k_m_fix),is.null(input$k_m_mean),is.null(input$k_m_mean_sss)))) return(NULL)
if(any(input$male_parms&!is.na(input$k_m))) {k_vbgf_m_in<-input$k_m}
if(any(input$male_parms_fix&!is.na(input$k_m_fix))) {k_vbgf_m_in<-input$k_m_fix}
if(any(input$male_parms_est&!is.na(input$k_m_mean))) {k_vbgf_m_in<-input$k_m_mean}
if(any(input$male_parms_SSS&!is.na(input$k_m_mean_sss))) {k_vbgf_m_in<-input$k_m_mean_sss}
k_vbgf_m_in
})
t0_vbgf<-reactive({
t0_vbgf<-NA
if(all(c(is.null(input$t0_f),is.null(input$t0_f_fix),is.null(input$t0_f_mean),is.null(input$t0_f_mean_sss)))) return(NULL)
if(!is.na(input$t0_f)) {t0_vbgf<-input$t0_f}
if(!is.na(input$t0_f_fix)) {t0_vbgf<-input$t0_f_fix}
if(!is.na(input$t0_f_mean)) {t0_vbgf<-input$t0_f_mean}
if(!is.na(input$t0_f_mean_sss)) {t0_vbgf<-input$t0_f_mean_sss}
t0_vbgf
})
t0_vbgf_m_in<-reactive({
t0_vbgf_m_in<-NA
if(all(c(is.null(input$t0_m),is.null(input$t0_m_fix),is.null(input$t0_m_mean),is.null(input$t0_m_mean_sss)))) return(NULL)
if(any(input$male_parms&!is.na(input$t0_m))) {t0_vbgf_m_in<-input$t0_m}
if(any(input$male_parms_fix&!is.na(input$t0_m_fix))) {t0_vbgf_m_in<-input$t0_m_fix}
if(any(input$male_parms_est&!is.na(input$t0_m_mean))) {t0_vbgf_m_in<-input$t0_m_mean}
if(any(input$male_parms_SSS&!is.na(input$t0_m_mean_sss))) {t0_vbgf_m_in<-input$t0_m_mean_sss}
t0_vbgf_m_in
})
L50<-reactive({
L50<-NA
if(all(c(is.null(input$L50_f),is.null(input$L50_f_fix),is.null(input$L50_f_est),is.null(input$L50_f_sss)))) return(NULL)
if(!is.na(input$L50_f)) {L50<-input$L50_f}
if(!is.na(input$L50_f_fix)) {L50<-input$L50_f_fix}
if(!is.na(input$L50_f_est)) {L50<-input$L50_f_est}
if(!is.na(input$L50_f_sss)) {L50<-input$L50_f_sss}
L50
})
L95<-reactive({
L95<-NA
if(all(c(is.null(input$L95_f),is.null(input$L95_f_fix),is.null(input$L95_f_est),is.null(input$L95_f_sss)))) return(NULL)
if(!is.na(input$L95_f)) {L95<-input$L95_f}
if(!is.na(input$L95_f_fix)) {L95<-input$L95_f_fix}
if(!is.na(input$L95_f_est)) {L95<-input$L95_f_est}
if(!is.na(input$L95_f_sss)) {L95<-input$L95_f_sss}
L95
})
#############
### PLOTS ###
#############
##################
### CATCH PLOT ###
##################
observeEvent(req(!is.null(rv.Ct$data)), {
shinyjs::show(output$catch_plots_label<-renderText({"Removal history"}))
})
observeEvent(req(!is.null(rv.Ct$data)), {
output$Ctplot_it<-renderUI({
if(!is.null(rv.Ct$data))
{
output$Ctplot <- renderPlot({
if (is.null(rv.Ct$data)) return(NULL)
rv.Ct$data %>%
pivot_longer(-1, names_to = "Fleet", values_to = "catch") %>%
ggplot(aes_string(names(.)[1], "catch", color = "Fleet")) +
geom_point() +
geom_line(lwd=1.5) +
ylab("Removals") +
xlab("Year") +
scale_color_viridis_d()
})
plotOutput("Ctplot")
}
})
})
##########################
### LENGTH COMPS PLOTS ###
##########################
observeEvent(req(!is.null(rv.Lt$data)), {
shinyjs::show(output$lt_comp_plots_label<-renderText({"Length compositions"}))
})
observeEvent(req(!is.null(rv.Lt$data)), {
output$Ltplot_it<-renderUI({
if(!is.null(rv.Lt$data))
{
output$Ltplot<-renderPlot({
if (is.null(rv.Lt$data)) return(NULL)
rv.Lt$data %>%
rename_all(tolower) %>%
dplyr::select(-nsamps) %>%
pivot_longer(c(-year, -fleet, -sex)) %>%
mutate(Year = factor(year),
name = as.numeric(gsub("[^0-9.-]", "", name))) %>%
ggplot(aes(name, value, color=Year)) +
geom_line() +
#geom_col(position="dodge") +
facet_grid(sex~fleet, scales="free_y",labeller = label_both) +
# facet_wrap(sex~year, scales="free_y",ncol=5) +
xlab("Length bin") +
ylab("Frequency") +
scale_fill_viridis_d()+
geom_vline(xintercept = L50)
})
plotOutput("Ltplot")
}
})
})
# observeEvent(req(!is.null(input$file1)), {
# output$Ltplot<-renderPlot({
# inFile<- input$file1
# # if (is.null(inFile)) {
# # return(NULL)
# # shinyjs::hide("Ltplot")}
# # else{
# Lt.comp.data<-read.csv(inFile$datapath,check.names=FALSE)
# lt.dat.plot<-(Lt.comp.data)[,c(-4)]
# dat.gg<-melt(lt.dat.plot,id=colnames(lt.dat.plot)[1:3])
# colnames(dat.gg)<-c("year","fleet","sex","bin","ltnum")
# ggplot(dat.gg,aes(bin,ltnum,fill=factor(fleet)))+
# geom_col(color="white",position="dodge")+
# #geom_col(fill="#236192",color="white")+
# facet_wrap(~year,scales="free_y")+
# xlab("Length bin")+
# ylab("Frequency")+
# labs(fill="Fleet")+
# scale_fill_viridis(discrete=TRUE, option="viridis")
# #scale_x_discrete(breaks=c(1,5,10,20),labels=as.character(levels(dat.gg$bin))[c(1,5,10,20)])
# #scale_fill_brewer(palette = "BuPu")
# # }
# })
# })
#################
### AGE PLOTS ###
#################
observeEvent(req(!is.null(rv.Age$data)), {
shinyjs::show(output$marginal_age_comp_plots_label<-renderText({"Marginal age compositions"}))
})
observeEvent(req(!is.null(rv.Age$data)), {
shinyjs::show(output$conditional_age_comp_plots_label<-renderText({"Conditional age at length"}))
})
observeEvent(req(!is.null(rv.Age$data)), {
marginal_ages<-subset(rv.Age$data,Lbin_hi<0)
Cond_ages<-subset(rv.Age$data,Lbin_hi>=0)
output$Ageplot_it_marginal<-renderUI({
if(!is.null(rv.Age$data))
{
output$Ageplot_marginal<-renderPlot({
#inFile_age <- rv.Age$data
# if (is.null(rv.Age$data)) return(NULL)
if (nrow(marginal_ages)==0) return(NULL)
# rv.Age$data %>%
marginal_ages %>%
rename_all(tolower) %>%
dplyr::select(-nsamps,-lbin_hi) %>%
pivot_longer(c(-year, -fleet, -sex, -lbin_low)) %>%
mutate(Year = factor(year),
name = as.numeric(gsub("[^0-9.-]", "", name))) %>%
ggplot(aes(name, value, color=Year)) +
geom_line() +
# geom_col(position="dodge") +
#facet_wrap(sex~year, scales="free_y",ncol=5) +
facet_grid(sex~fleet, scales="free_y",labeller = label_both) +
#scale_y_continuous(limits=c(0,max(colSums(rv.Age$data[-1,7:ncol(rv.Age$data)]))))+
#scale_y_continuous(limits=c(0,20))+
xlab("Age bin") +
ylab("Frequency") +
scale_fill_viridis_d()
})
plotOutput("Ageplot_marginal")
}
})
output$Ageplot_it_cond<-renderUI({
if(!is.null(rv.Age$data))
{
output$Ageplot_conditional<-renderPlot({
# if (is.null(rv.Age$data)) return(NULL)
if (nrow(Cond_ages)==0) return(NULL)
Cond_ages_plots<-melt(Cond_ages[,c(1,3,4,7,9:ncol(Cond_ages))],id.vars=c("Year","Fleet","Sex","Lbin_hi"))
Cond_ages_plots_pos<-subset(Cond_ages_plots,value>0)
ggplot(Cond_ages_plots_pos,aes(x=as.numeric(variable),y=as.numeric(Lbin_hi),color=Year))+
geom_point()+
facet_grid(vars(Sex),vars(Fleet),labeller = label_both)+
xlab("Age bin")+
ylab("Length bin")
})
plotOutput("Ageplot_conditional")
}
})
})
# output$Ageplot <- renderPlot({
# inFile_age <- rv.Age$data
# if (is.null(inFile_age)) return(NULL)
# rv.Age$data %>%
# pivot_longer(-1, names_to = "year", values_to = "ltnum") %>%
# rename(bin = Bins) %>%
# ggplot(aes(bin, ltnum)) +
# geom_col(fill="#1D252D", color="white") +
# facet_wrap(~year) +
# xlab("Age bin") +
# ylab("Frequency")
# })
##################
### INDEX PLOT ###
##################
observeEvent(req(!is.null(rv.Index$data)), {
shinyjs::show(output$index_plots_label<-renderText({"Indices of Abundance"}))
})
observeEvent(req(!is.null(rv.Index$data)), {
output$Indexplot_it<-renderUI({
if(!is.null(rv.Index$data))
{
output$Indexplot <- renderPlot({
if (is.null(rv.Index$data)) return(NULL)
plot.Index<-rv.Index$data
plot.Index[,3]<-as.factor(plot.Index[,3])
plot.Index.zscore<-list()
for(i in 1:length(unique(plot.Index$Fleet)))
{
plot.Index.temp<-plot.Index[plot.Index$Fleet %in% unique(plot.Index$Fleet)[i],]
plot.Index.temp$Index<-(plot.Index.temp$Index-mean(plot.Index.temp$Index))/sd(plot.Index.temp$Index)
plot.Index.zscore[[i]]<-plot.Index.temp
}
plot.Index.zs<-do.call("rbind", plot.Index.zscore)
ggplot(plot.Index.zs,aes(x=Year,y=Index,group=Fleet, colour=Fleet)) +
geom_line(lwd=1.1) +
geom_errorbar(aes(ymin=qlnorm(0.0275,log(Index),CV),ymax=qlnorm(0.975,log(Index),CV),group=Fleet),width=0,size=1)+
geom_point(aes(colour=Fleet),size=4) +
ylab("Z-score") +
xlab("Year") +
scale_color_viridis_d()
})
plotOutput("Indexplot")
}
})
})
#####################
### Plot M by age ###
#####################
output$Mplot<-renderPlot({
mf.in = M_f_in()+0.000000000000001
mm.in = M_f_in()+0.000000000000001
# if(input$male_parms|input$male_parms_fix)
if(input$male_parms|input$male_parms_SSS|input$male_parms_fix|input$male_parms_est)
{
mm.in = M_m_in()+0.000000000000001
}
if(any(is.na(c(mf.in, mm.in)))|any(is.null(c(mf.in, mm.in)))) return(NULL)
Female_M = data.frame(Ages = 0:Nages(), PopN = exp(-mf.in * 0:Nages()), Sex="Female")
Male_M = data.frame(Ages = 0:Nages(), PopN=exp(-mm.in * 0:Nages()), Sex="Male")
M_sexes <- rbind(Female_M, Male_M)
Nage_4_plot <- grobTree(textGrob(paste0("Max age =", Nages()), x=0.1, y=0.95, hjust=0,
gp=gpar(col="darkblue", fontsize=12, fontface="italic")))
ggplot(M_sexes,aes(Ages, PopN, color=Sex))+
geom_line(aes(linetype=Sex), lwd=2)+
ylab("Cohort decline by M")+
annotation_custom(Nage_4_plot)
})
##############################
### Plot VBGF and maturity ###
##############################
output$VBGFplot<-renderPlot({
f_Linf = m_Linf = Linf()
f_k = m_k = k_vbgf()
f_t0 = m_t0 = t0_vbgf()
f_L50 = L50()
f_L95 = L95()
maxage = Nages()
if(any(input$male_parms,input$male_parms_SSS,input$male_parms_fix,input$male_parms_est))
{
m_Linf = Linf_m_in()
m_k = k_vbgf_m_in()
m_t0 = t0_vbgf_m_in()
}
if(any(is.na(c(f_Linf, f_k, f_t0)))=="FALSE"){
vbgf_female = data.frame(Age = c(f_t0:Nages()),
Length = VBGF(f_Linf, f_k, f_t0, c(f_t0:Nages())), Sex="Female")
vbgf_male = data.frame(Age = f_t0:Nages(),
Length=VBGF(m_Linf, m_k, m_t0, c(f_t0:Nages())), Sex="Male")
rbind(vbgf_female,vbgf_male) %>%
ggplot(aes(Age, Length, color=Sex)) +
geom_line(aes(linetype=Sex), lwd=2) -> vbgf.plot
if(any(is.na(c(f_L50, f_L95)))=="FALSE"){
age.mat = data.frame(Age = VBGF.age(f_Linf, f_k, f_t0, c(f_L50, f_L95)),
Length = c(f_L50, f_L95), Sex="Female")
vbgf.plot +
geom_point(data = age.mat, aes(Age, Length), color = "darkorange", size=6) +
geom_text(data = age.mat,label=c("Lmat50%", "Lmat95%"),
nudge_x = -0.1 * Nages(), color="black") -> vbgf.plot
}
vbgf.plot
}
})
###################
### Selectivity ###
###################
# observeEvent(req(input$Sel50,input$Selpeak), {
# shinyjs::show(output$Sel_plots_label<-renderText({"Selectivity"}))
# })
#h4("Selectivity")
output$Dep_plot_title<-renderUI({
if(((as.numeric(input$tabs)*1)/1)<4&is.null(rv.Lt$data)&!is.null(rv.Ct$data)&is.null(rv.Age$data)&is.null(rv.Index$data)&any(is.null(input$user_model),!input$user_model)){
h4("Relative Stock Status Prior")
}
})
output$Dep_plot_it<-renderUI({
if(((as.numeric(input$tabs)*1)/1)<4&is.null(rv.Lt$data)&!is.null(rv.Ct$data)&is.null(rv.Age$data)&is.null(rv.Index$data)&any(is.null(input$user_model),!input$user_model)){
output$Depletion_plot <- renderPlot({
if(!is.na(input$status_year)&!is.na(input$Depl_mean_sss))
{
if(input$Depl_prior_sss=="beta"){dep.hist.sss<-data.frame(draws=rbeta.ab(100000,input$Depl_mean_sss,input$Depl_SD_sss,0,1))}
if(input$Depl_prior_sss=="lognormal"){dep.hist.sss<-data.frame(draws=rlnorm(100000,log(input$Depl_mean_sss),input$Depl_SD_sss))}
if(input$Depl_prior_sss=="truncated normal"){dep.hist.sss<-data.frame(draws=rtruncnorm(100000,0,1,input$Depl_mean_sss,input$Depl_SD_sss))}
if(input$Depl_prior_sss=="uniform"){dep.hist.sss<-data.frame(draws=runif(100000,input$Depl_mean_sss,input$Depl_SD_sss))}
if(input$Depl_prior_sss=="no prior"){NULL}
Depletion_plot<-gghistogram(dep.hist.sss, x = "draws", fill = "purple")
Depletion_plot
}
})
plotOutput("Depletion_plot")
}
})
output$Selplot <- renderPlot({
if(input$Sel_choice=="Logistic"&any(any(input$Sel50[1]=="",is.null(input$Sel50)),any(input$Selpeak[1]=="",is.null(input$Selpeak)))) return(NULL)
if(input$Sel_choice=="Logistic")
{
if(all(length(as.numeric(trimws(unlist(strsplit(input$Sel50,",")))))==length(as.numeric(trimws(unlist(strsplit(input$Selpeak,","))))),
all(input$Sel50!=""),
all(!is.null(input$Sel50)),
all(input$Selpeak!=""),
all(!is.null(input$Selpeak))))
{
Sel50<-as.numeric(trimws(unlist(strsplit(input$Sel50,","))))
Selpeak<-as.numeric(trimws(unlist(strsplit(input$Selpeak,","))))
PeakDesc<-rep(10000,length(Selpeak))
LtPeakFinal<-rep(0.0001,length(Selpeak))
FinalSel<-rep(0.999,length(Selpeak))
# if(input$Sel_choice=="Logistic")
# {
# }
# if(input$Sel_choice=="Dome-shaped")
# {
# PeakDesc<-as.numeric(trimws(unlist(strsplit(input$PeakDesc,","))))
# LtPeakFinal<-as.numeric(trimws(unlist(strsplit(input$LtPeakFinal,","))))
# FinalSel<-as.numeric(trimws(unlist(strsplit(input$FinalSel,","))))
# }
Sel.out<-doubleNorm24.sel(Sel50=Sel50[1],Selpeak=Selpeak[1],PeakDesc=PeakDesc[1],LtPeakFinal=LtPeakFinal[1],FinalSel=FinalSel[1])
Sel.out<-data.frame(Bin=Sel.out[,1],Sel=Sel.out[,2],Fleet="Fleet 1")
if(length(Sel50)>1)
{
for(ii in 2:length(Sel50))
{
Sel.out.temp<-doubleNorm24.sel(Sel50=Sel50[ii],Selpeak=Selpeak[ii],PeakDesc=PeakDesc[ii],LtPeakFinal=LtPeakFinal[ii],FinalSel=FinalSel[ii])
Sel.out.temp<-data.frame(Bin=Sel.out.temp[,1],Sel=Sel.out.temp[,2],Fleet=paste0("Fleet ",ii))
Sel.out<-rbind(Sel.out,Sel.out.temp)
}
}
selplot.out<-ggplot(Sel.out,aes(Bin,Sel,colour=Fleet)) +
geom_line(lwd=1.5) +
ylab("Length Bins") +
xlab("Selectivity") +
scale_color_viridis_d()
}
}
if(input$Sel_choice=="Dome-shaped")
{
if(all(length(as.numeric(trimws(unlist(strsplit(input$Sel50,",")))))==length(as.numeric(trimws(unlist(strsplit(input$Selpeak,","))))),
length(as.numeric(trimws(unlist(strsplit(input$Sel50,",")))))==length(as.numeric(trimws(unlist(strsplit(input$PeakDesc,","))))),
length(as.numeric(trimws(unlist(strsplit(input$Sel50,",")))))==length(as.numeric(trimws(unlist(strsplit(input$LtPeakFinal,","))))),
length(as.numeric(trimws(unlist(strsplit(input$Sel50,",")))))==length(as.numeric(trimws(unlist(strsplit(input$FinalSel,","))))),
all(input$Sel50!=""),
all(!is.null(input$Sel50)),
all(input$Selpeak!=""),
all(!is.null(input$Selpeak))))
{
Sel50<-as.numeric(trimws(unlist(strsplit(input$Sel50,","))))
Selpeak<-as.numeric(trimws(unlist(strsplit(input$Selpeak,","))))
PeakDesc<-as.numeric(trimws(unlist(strsplit(input$PeakDesc,","))))
LtPeakFinal<-as.numeric(trimws(unlist(strsplit(input$LtPeakFinal,","))))
FinalSel<-as.numeric(trimws(unlist(strsplit(input$FinalSel,","))))
# if(input$Sel_choice=="Logistic")
# {
# PeakDesc<-rep(10000,length(Selpeak))
# LtPeakFinal<-rep(0.0001,length(Selpeak))
# FinalSel<-rep(0.999,length(Selpeak))
# }
# if(input$Sel_choice=="Dome-shaped")
# {
# }
Sel.out<-doubleNorm24.sel(Sel50=Sel50[1],Selpeak=Selpeak[1],PeakDesc=PeakDesc[1],LtPeakFinal=LtPeakFinal[1],FinalSel=FinalSel[1])
Sel.out<-data.frame(Bin=Sel.out[,1],Sel=Sel.out[,2],Fleet="Fleet 1")
if(length(Sel50)>1)
{
for(ii in 2:length(Sel50))
{
Sel.out.temp<-doubleNorm24.sel(Sel50=Sel50[ii],Selpeak=Selpeak[ii],PeakDesc=PeakDesc[ii],LtPeakFinal=LtPeakFinal[ii],FinalSel=FinalSel[ii])
Sel.out.temp<-data.frame(Bin=Sel.out.temp[,1],Sel=Sel.out.temp[,2],Fleet=paste0("Fleet ",ii))
Sel.out<-rbind(Sel.out,Sel.out.temp)
}
}
selplot.out<-ggplot(Sel.out,aes(Bin,Sel,colour=Fleet)) +
geom_line(lwd=1.5) +
ylab("Length Bins") +
xlab("Selectivity") +
scale_color_viridis_d()
}
}
if(!is.null(get0("selplot.out"))){return(selplot.out)}
else(return(NULL))
})
output$Selplot_SSS <- renderPlot({
if(input$Sel_choice_sss=="Logistic"&any(any(input$Sel50_sss[1]=="",is.null(input$Sel50_sss)),any(input$Selpeak_sss[1]=="",is.null(input$Selpeak_sss)))) return(NULL)
if(input$Sel_choice_sss=="Logistic")
{
if(all(length(as.numeric(trimws(unlist(strsplit(input$Sel50_sss,",")))))==length(as.numeric(trimws(unlist(strsplit(input$Selpeak_sss,","))))),
all(input$Sel50_sss!=""),
all(!is.null(input$Sel50_sss)),
all(input$Selpeak_sss!=""),
all(!is.null(input$Selpeak_sss))))
{
Sel50<-as.numeric(trimws(unlist(strsplit(input$Sel50_sss,","))))
Selpeak<-as.numeric(trimws(unlist(strsplit(input$Selpeak_sss,","))))
PeakDesc<-rep(10000,length(Selpeak))
LtPeakFinal<-rep(0.0001,length(Selpeak))
FinalSel<-rep(0.999,length(Selpeak))
Sel.out<-doubleNorm24.sel(Sel50=Sel50[1],Selpeak=Selpeak[1],PeakDesc=PeakDesc[1],LtPeakFinal=LtPeakFinal[1],FinalSel=FinalSel[1])
Sel.out<-data.frame(Bin=Sel.out[,1],Sel=Sel.out[,2],Fleet="Fleet 1")
if(length(Sel50)>1)
{
for(ii in 2:length(Sel50))
{
Sel.out.temp<-doubleNorm24.sel(Sel50=Sel50[ii],Selpeak=Selpeak[ii],PeakDesc=PeakDesc[ii],LtPeakFinal=LtPeakFinal[ii],FinalSel=FinalSel[ii])
Sel.out.temp<-data.frame(Bin=Sel.out.temp[,1],Sel=Sel.out.temp[,2],Fleet=paste0("Fleet ",ii))
Sel.out<-rbind(Sel.out,Sel.out.temp)
}
}
selplot.out<-ggplot(Sel.out,aes(Bin,Sel,colour=Fleet)) +
geom_line(lwd=1.5) +
ylab("Length Bins") +
xlab("Selectivity") +
scale_color_viridis_d()
}
}
if(input$Sel_choice_sss=="Dome-shaped")
{
if(all(length(as.numeric(trimws(unlist(strsplit(input$Sel50_sss,",")))))==length(as.numeric(trimws(unlist(strsplit(input$Selpeak_sss,","))))),
length(as.numeric(trimws(unlist(strsplit(input$Sel50_sss,",")))))==length(as.numeric(trimws(unlist(strsplit(input$PeakDesc_sss,","))))),
length(as.numeric(trimws(unlist(strsplit(input$Sel50_sss,",")))))==length(as.numeric(trimws(unlist(strsplit(input$LtPeakFinal_sss,","))))),
length(as.numeric(trimws(unlist(strsplit(input$Sel50_sss,",")))))==length(as.numeric(trimws(unlist(strsplit(input$FinalSel_sss,","))))),
all(input$Sel50_sss!=""),
all(!is.null(input$Sel50_sss)),
all(input$Selpeak_sss!=""),
all(!is.null(input$Selpeak_sss))))
{
Sel50<-as.numeric(trimws(unlist(strsplit(input$Sel50_sss,","))))
Selpeak<-as.numeric(trimws(unlist(strsplit(input$Selpeak_sss,","))))
PeakDesc<-as.numeric(trimws(unlist(strsplit(input$PeakDesc_sss,","))))
LtPeakFinal<-as.numeric(trimws(unlist(strsplit(input$LtPeakFinal_sss,","))))
FinalSel<-as.numeric(trimws(unlist(strsplit(input$FinalSel_sss,","))))
Sel.out<-doubleNorm24.sel(Sel50=Sel50[1],Selpeak=Selpeak[1],PeakDesc=PeakDesc[1],LtPeakFinal=LtPeakFinal[1],FinalSel=FinalSel[1])
Sel.out<-data.frame(Bin=Sel.out[,1],Sel=Sel.out[,2],Fleet="Fleet 1")
if(length(Sel50)>1)
{
for(ii in 2:length(Sel50))
{
Sel.out.temp<-doubleNorm24.sel(Sel50=Sel50[ii],Selpeak=Selpeak[ii],PeakDesc=PeakDesc[ii],LtPeakFinal=LtPeakFinal[ii],FinalSel=FinalSel[ii])
Sel.out.temp<-data.frame(Bin=Sel.out.temp[,1],Sel=Sel.out.temp[,2],Fleet=paste0("Fleet ",ii))
Sel.out<-rbind(Sel.out,Sel.out.temp)
}
}
selplot.out<-ggplot(Sel.out,aes(Bin,Sel,colour=Fleet)) +
geom_line(lwd=1.5) +
ylab("Length Bins") +
xlab("Selectivity") +
scale_color_viridis_d()
}
}
if(!is.null(get0("selplot.out"))){return(selplot.out)}
else(return(NULL))
})
#############################################
### END PLOTS ###
#############################################
#############################################
######## PREPARE FILES andD RUN SSS #########
#############################################
SSS.run<-observeEvent(input$run_SSS,{
show_modal_spinner(spin="flower",color=wes_palettes$Zissou1[1],text="Create model files")
print(1)
# progress <- shiny::Progress$new(session, min=1, max=2)
# on.exit(progress$close())
# progress$set(message = 'Model run in progress',
# detail = '')
# for (i in 1:2) {
# progress$set(value = i)
# Sys.sleep(0.5)
# }
#Copy and move files
if(file.exists(paste0("Scenarios/",input$Scenario_name)))
{
unlink(paste0("Scenarios/",input$Scenario_name),recursive=TRUE)
# file.remove(paste0(getwd(),"/Scenarios/",input$Scenario_name))
}
#if(input$)
{
file.copy(paste0("SSS_files/sssexample_BH"),paste0("Scenarios"),recursive=TRUE,overwrite=TRUE)
file.rename(paste0("Scenarios/sssexample_BH"), paste0("Scenarios/",input$Scenario_name))
}
#if()
# {
# file.copy(paste0(getwd(),"/SSS_files/sssexample_RickPow"),paste0(getwd(),"/Scenarios"),recursive=TRUE,overwrite=TRUE)
# file.rename(paste0(getwd(),"/Scenarios/sssexample_RickPow"), paste0(getwd(),"/Scenarios/",input$Scenario_name))
# }
#Read data and control files
data.file<-SS_readdat(paste0("Scenarios/",input$Scenario_name,"/sss_example.dat"))
ctl.file<-SS_readctl(paste0("Scenarios/",input$Scenario_name,"/sss_example.ctl"),use_datlist = TRUE, datlist=data.file)
#Read, edit then write new DATA file
data.file$styr<-input$styr
data.file$endyr<-input$endyr
data.file$Nages<-Nages()
#Catches
Catch.data<-rv.Ct$data
catch.dep.fleets<-ncol(Catch.data)
data.file$Nfleets<-catch.dep.fleets
if(!is.null(rv.Index$data))
{
index.fleets<-max(rv.Index$data$Fleet)
if(index.fleets>catch.dep.fleets) {data.file$Nfleets<-index.fleets}
if(index.fleets==catch.dep.fleets) {data.file$Nfleets<-index.fleets+1}
if(index.fleets<catch.dep.fleets) {data.file$Nfleets<-catch.dep.fleets}
}
if((data.file$Nfleets-1)>1){
for(i in 1:(data.file$Nfleets-2))
{
data.file$fleetinfo<-rbind(data.file$fleetinfo,data.file$fleetinfo[1,])
data.file$CPUEinfo<-rbind(data.file$CPUEinfo,data.file$CPUEinfo[1,])
}
data.file$fleetinfo$fleetname<-c(paste0("Fishery",1:(catch.dep.fleets-1)),"Depl")
data.file$fleetinfo$type[c(2,data.file$Nfleets)]<-c(1,3)
data.file$fleetinfo$surveytiming[c(2,data.file$Nfleets)]<-c(-1,0.1)
data.file$CPUEinfo[,1]<-1:data.file$Nfleets
data.file$CPUEinfo[c(2,data.file$Nfleets),2]<-c(1,34)
data.file$CPUE$index<-data.file$Nfleets
}
year.in<-Catch.data[,1]
catch.cols<-colnames(data.file$catch)
catch_temp<-list()
for(i in 1:(data.file$Nfleets-1))
{
catch_temp[[i]]<-data.frame(
c(-999,year.in),
rep(1,length(year.in)+1),
rep(i,length(year.in)+1),
c(0,Catch.data[,i+1]),
rep(0.01,length(year.in)+1)
)
}
data.file$catch<-list.rbind(catch_temp)
colnames(data.file$catch)<-catch.cols
#Relative stock status
data.file$CPUE$year<-c(input$styr,input$status_year)
#Length composition data
if(input$Linf_f_mean_sss>30){data.file$binwidth<-2}
data.file$minimum_size<-floor(input$Linf_f_mean_sss/10)
data.file$maximum_size<-ceiling(input$Linf_f_mean_sss+(input$Linf_f_mean_sss*0.1))
data.file$lbin_vector<-seq(data.file$minimum_size,data.file$maximum_size,data.file$binwidth)
data.file$N_lbinspop<-length(data.file$lbin_vector)
#Age composition data
# if (is.null(inFile_age)){
# data.file$N_agebins<-Nages()
# data.file$agebin_vector<-1:Nages()
# data.file$ageerror<-data.frame(matrix(c(rep(-1,(Nages()+1)),rep(0.001,(Nages()+1))),2,(Nages()+1),byrow=TRUE))
# colnames(data.file$ageerror)<-paste0("age",1:Nages())
# }
#Catch units
if(input$Ct_units_choice_SSS)
{
ct.units<-as.numeric(trimws(unlist(strsplit(input$fleet_ct_units_SSS,","))))
#data.file$fleetinfo[ct.units,4]<-2 #use this when just specifying which are fleets are numbers
data.file$fleetinfo[,4]<-c(ct.units,1)
}
SS_writedat(data.file,paste0("Scenarios/",input$Scenario_name,"/sss_example.dat"),overwrite=TRUE)
####################### END DATA FILE #####################################
####################### START SSS CTL FILE #####################################
if(!is.null(input$GT5)){if(input$GT5)
{
ctl.file$N_platoon<-5
ctl.file$sd_ratio<-0.7
ctl.file$submorphdist<-c(-1,0.25,0.5,0.25,0.125)
}
}
#if(all(any(input$est_parms==TRUE,input$est_parms2==FALSE),any(all(!is.null(rv.Lt$data),!is.null(rv.Ct$data)),all(!is.null(rv.Age$data),!is.null(rv.Ct$data))))==TRUE)
#{
fem_vbgf<-VBGF(input$Linf_f_mean_sss,input$k_f_mean_sss,input$t0_f_mean_sss,c(0:Nages()))
#c("lognormal","truncated normal","uniform","beta")
prior.name<-c("no prior","symmetric beta", "beta","lognormal","gamma","normal")
prior.type<-c(0:3,5,6)
#Females
#M
if(input$M_f_prior=="lognormal"){ctl.file$MG_parms[1,3:4]<-c(input$M_f_mean_sss,log(input$M_f_mean_sss))}
else {ctl.file$MG_parms[1,3:4]<-c(input$M_f_mean_sss,input$M_f_mean_sss)}
#L0
ctl.file$Growth_Age_for_L1<-input$t0_f_mean_sss
ctl.file$Growth_Age_for_L1<-0
#if(input$t0_f_prior=="lognormal"){ctl.file$MG_parms[2,3:4]<-c(fem_vbgf[1],log(fem_vbgf[1]))}
#else {ctl.file$MG_parms[2,3:4]<-fem_vbgf[1]}
if(input$t0_f_prior=="lognormal"){ctl.file$MG_parms[2,3:4]<-c(0,log(0.0000001))}
else {ctl.file$MG_parms[2,3:4]<-0}
#Linf
if(input$Linf_f_prior=="lognormal"){ctl.file$MG_parms[3,3:4]<-c(input$Linf_f_mean_sss,log(input$Linf_f_mean_sss))}
else{ctl.file$MG_parms[3,3:4]<-input$Linf_f_mean_sss}
#k
if(input$k_f_prior=="lognormal"){ctl.file$MG_parms[4,3:4]<-c(input$k_f_mean_sss,log(input$k_f_mean_sss))}
else {ctl.file$MG_parms[4,3:4]<-input$k_f_mean_sss}
#CV young
if(input$CV_lt_f_young_prior=="lognormal"){ctl.file$MG_parms[5,3:4]<-c(input$CV_lt_f_young_mean_sss,log(input$CV_lt_f_young_mean_sss))}
else{ctl.file$MG_parms[5,3:4]<-input$CV_lt_f_young_mean_sss}
#CV old
if(input$CV_lt_f_old_prior=="lognormal"){ctl.file$MG_parms[6,3:4]<-c(input$CV_lt_f_old_mean_sss,log(input$CV_lt_f_old_mean_sss))}
else{ctl.file$MG_parms[6,3:4]<-input$CV_lt_f_old_mean_sss}
#Weight-length
ctl.file$MG_parms[7,3:4]<-input$WLa_f_sss #coefficient
ctl.file$MG_parms[8,3:4]<- input$WLb_f_sss #exponent
#Maturity
ctl.file$MG_parms[9,3:4]<-input$L50_f_sss #Lmat50%
ctl.file$MG_parms[10,3:4]<- log(0.05/0.95)/(input$L95_f_sss-input$L50_f_sss) #Maturity slope
#Males
ctl.file$MG_parms[13,3:4]<-c(input$M_f_mean_sss,log(input$M_f_mean_sss)) #M
#ctl.file$MG_parms[14,3:4]<-fem_vbgf[1] #L0
ctl.file$MG_parms[14,3:4]<-0 #L0
ctl.file$MG_parms[15,3:4]<-input$Linf_f_mean_sss #Linf
ctl.file$MG_parms[16,3:4]<-input$k_f_mean_sss #k
ctl.file$MG_parms[17,3:4]<-input$CV_lt_f_young_mean_sss #CV
ctl.file$MG_parms[18,3:4]<-input$CV_lt_f_old_mean_sss #CV
#Weight-length
ctl.file$MG_parms[19,3:4]<-input$WLa_f_sss #coefficient
ctl.file$MG_parms[20,3:4]<- input$WLb_f_sss #exponent
ctl.file$MG_parms[11,3:4]<-input$Fec_a_f_sss #coefficient
ctl.file$MG_parms[12,3:4]<- input$Fec_b_f_sss #exponent
if(input$male_offset_SSS)
{
ctl.file$parameter_offset_approach<-2 #Change to offset approach
ctl.file$MG_parms[13,c(1,3:4)]<-0 #M
ctl.file$MG_parms[14,c(1,3:4)]<-0 #L0
ctl.file$MG_parms[15,c(1,3:4)]<-0 #Linf
ctl.file$MG_parms[16,c(1,3:4)]<-0 #k
ctl.file$MG_parms[17,c(1,3:4)]<-0 #CV
ctl.file$MG_parms[18,c(1,3:4)]<-0 #CV
#Weight-length
ctl.file$MG_parms[19,c(1,3:4)]<-input$WLa_f_sss #coefficient
ctl.file$MG_parms[20,c(1,3:4)]<-input$WLb_f_sss #exponent
}
if(input$male_parms_SSS)
{
male_vbgf_sss<-VBGF(input$Linf_m_mean_sss,input$k_m_mean_sss,input$t0_m_mean_sss,c(input$t0_f_mean_sss:Nages()))
#M
if(input$M_m_prior_sss=="lognormal"){ctl.file$MG_parms[13,3:4]<-c(input$M_m_mean_sss,log(input$M_m_mean_sss))}
else {ctl.file$MG_parms[13,3:4]<-c(input$M_m_mean_sss,input$M_m_mean_sss)}
#L0
if(input$t0_f_prior_sss=="lognormal"){ctl.file$MG_parms[14,3:4]<-c(male_vbgf_sss[1],log(male_vbgf_sss[1]))}
else {ctl.file$MG_parms[14,3:4]<-c(male_vbgf_sss[1],male_vbgf_sss[1])}
# if(input$t0_f_prior_sss=="lognormal"){ctl.file$MG_parms[14,3:4]<-c(0,log(0.0000001))}
#else {ctl.file$MG_parms[14,3:4]<-c(0,0)}
#Linf
if(input$Linf_f_prior_sss=="lognormal"){ctl.file$MG_parms[15,3:4]<-c(input$Linf_m_mean_sss,log(input$Linf_m_mean_sss))}
else{ctl.file$MG_parms[15,3:4]<-c(input$Linf_m_mean_sss,input$Linf_m_mean_sss)}
#k
if(input$k_f_prior_sss=="lognormal"){ctl.file$MG_parms[16,3:4]<-c(input$k_m_mean_sss,log(input$k_m_mean_sss))}
else {ctl.file$MG_parms[16,3:4]<-c(input$k_m_mean_sss,input$k_m_mean_sss)}
#CV young
if(input$CV_lt_f_young_prior_sss=="lognormal"){ctl.file$MG_parms[17,3:4]<-c(input$CV_lt_m_young_mean_sss,log(input$CV_lt_m_young_mean_sss))}
else{ctl.file$MG_parms[17,3:4]<-c(input$CV_lt_m_young_mean_sss,input$CV_lt_m_young_mean_sss)}
#CV old
if(input$CV_lt_f_old_prior_sss=="lognormal"){ctl.file$MG_parms[18,3:4]<-c(input$CV_lt_m_old_mean_sss,log(input$CV_lt_m_old_mean_sss))}
else{ctl.file$MG_parms[18,3:4]<-c(input$CV_lt_m_old_mean_sss,input$CV_lt_m_old_mean_sss)}
#Weight-length
ctl.file$MG_parms[19,3:4]<-input$WLa_m_sss #coefficient
ctl.file$MG_parms[20,3:4]<- input$WLb_m_sss #exponent
}
#S-R
#ctl.file$SR_parms[1,3:4]<-input$lnR0 #lnR0
if(input$h_ss_prior=="lognormal"){ctl.file$SR_parms[2,3:4]<-c(input$h_mean_ss,log(h_mean_ss))}
else{ctl.file$SR_parms[2,3:4]<-input$h_mean_ss}
#}
#
ctl.file$Q_options[1]<-data.file$Nfleets
#Selectivity
Sel50<-as.numeric(trimws(unlist(strsplit(input$Sel50_sss,","))))
Selpeak<-as.numeric(trimws(unlist(strsplit(input$Selpeak_sss,","))))
bin.width<-data.file$lbin_vector[2]-data.file$lbin_vector[1]
if(input$Sel_choice_sss=="Logistic")
{
#ctl.file$size_selex_parms[1,1:2]<-c(min(data.file$lbin_vector)+2*bin.width,max(data.file$lbin_vector)-2*bin.width)
ctl.file$size_selex_parms[1,1:2]<-c(min(data.file$lbin_vector),max(data.file$lbin_vector))
ctl.file$size_selex_parms[1,3:4]<- Selpeak[1]
ctl.file$size_selex_parms[2,3:4]<- 15
ctl.file$size_selex_parms[3,3:4]<- log(-((Sel50[1]-Selpeak[1])^2/log(0.5)))
ctl.file$size_selex_parms[4,3:4]<- -15
ctl.file$size_selex_parms[6,3:4]<- 15
}
if(input$Sel_choice_sss=="Dome-shaped")
{
PeakDesc<-as.numeric(trimws(unlist(strsplit(input$PeakDesc_sss,","))))
LtPeakFinal<-as.numeric(trimws(unlist(strsplit(input$LtPeakFinal_sss,","))))
FinalSel<-as.numeric(trimws(unlist(strsplit(input$FinalSel_sss,","))))
#ctl.file$size_selex_parms[1,1:2]<-c(min(data.file$lbin_vector)+2*bin.width,max(data.file$lbin_vector)-2*bin.width)
ctl.file$size_selex_parms[1,1:2]<-c(min(data.file$lbin_vector),max(data.file$lbin_vector))
ctl.file$size_selex_parms[1,3:4]<- Selpeak[1]
ctl.file$size_selex_parms[2,3:4]<- -log((max(data.file$lbin_vector)-Selpeak[1]-bin.width)/(PeakDesc[1]-Selpeak[1]-bin.width))
ctl.file$size_selex_parms[3,3:4]<- log(-((Sel50[1]-Selpeak[1])^2/log(0.5)))
ctl.file$size_selex_parms[4,3:4]<- log(LtPeakFinal[1])
ctl.file$size_selex_parms[6,3:4]<- -log((1/(FinalSel[1]+0.000000001)-1))
}
#Add other fleets
if((data.file$Nfleets-1)>1){
for(i in 1:(data.file$Nfleets-2))
{
#ctl.file$init_F<-rbind(ctl.file$init_F,ctl.file$init_F[1,])
ctl.file$size_selex_types<-rbind(ctl.file$size_selex_types,ctl.file$size_selex_types[1,])
ctl.file$age_selex_types<-rbind(ctl.file$age_selex_types,ctl.file$age_selex_types[1,])
ctl.file$size_selex_parms<-rbind(ctl.file$size_selex_parms,ctl.file$size_selex_parms[1:6,])
if(input$Sel_choice_sss=="Logistic")
{
#ctl.file$size_selex_parms[6*i+1,1:2]<-c(min(data.file$lbin_vector)+2*bin.width,max(data.file$lbin_vector)-2*bin.width)
ctl.file$size_selex_parms[6*i+1,1:2]<-c(min(data.file$lbin_vector),max(data.file$lbin_vector))
ctl.file$size_selex_parms[6*i+1,3:4]<- Selpeak[i+1]
ctl.file$size_selex_parms[6*i+2,3:4]<- 15
ctl.file$size_selex_parms[6*i+3,3:4]<- log(-((Sel50[i+1]-Selpeak[i+1])^2/log(0.5)))
ctl.file$size_selex_parms[6*i+4,3:4]<- -15
ctl.file$size_selex_parms[6*i+6,3:4]<- 15
}
if(input$Sel_choice_sss=="Dome-shaped")
{
ctl.file$size_selex_parms[6*i+1,1:2]<-c(min(data.file$lbin_vector),max(data.file$lbin_vector))
ctl.file$size_selex_parms[6*i+1,3:4]<- Selpeak[i+1]
ctl.file$size_selex_parms[6*i+2,3:4]<- -log((max(data.file$lbin_vector)-Selpeak[i+1]-bin.width)/(PeakDesc[i+1]-Selpeak[i+1]-bin.width))
ctl.file$size_selex_parms[6*i+3,3:4]<- log(-((Sel50[i+1]-Selpeak[i+1])^2/log(0.5)))
ctl.file$size_selex_parms[6*i+4,3:4]<- log(LtPeakFinal[i+1])
ctl.file$size_selex_parms[6*i+6,3:4]<- -log((1/(FinalSel[i+1]+0.000000001)-1))
}
}
ctl.file$size_selex_types[,1]<-c(rep(24,data.file$Nfleets-1),0)
ctl.file$age_selex_types[,1]<-10
#Re-label so r4ss can interpret these new entries
#rownames(ctl.file$init_F)<-paste0("InitF_seas_1_flt_",1:data.file$Nfleets,"Fishery",1:data.file$Nfleets)
rownames(ctl.file$age_selex_types)<-rownames(ctl.file$size_selex_types)<-c(paste0("Fishery",1:(data.file$Nfleets-1)),"Depl")
size_selex_parms_rownames<-list()
for(f_i in 1:(data.file$Nfleets-1))
{
size_selex_parms_rownames[[f_i]]<-c(paste0("SizeSel_P_1_Fishery",f_i,"(",f_i,")"),
paste0("SizeSel_P_2_Fishery",f_i,"(",f_i,")"),
paste0("SizeSel_P_3_Fishery",f_i,"(",f_i,")"),
paste0("SizeSel_P_4_Fishery",f_i,"(",f_i,")"),
paste0("SizeSel_P_5_Fishery",f_i,"(",f_i,")"),
paste0("SizeSel_P_6_Fishery",f_i,"(",f_i,")"))
}
size_selex_parms_rownames<-unlist(size_selex_parms_rownames)
rownames(ctl.file$size_selex_parms)<-size_selex_parms_rownames
}
SS_writectl(ctl.file,paste0("Scenarios/",input$Scenario_name,"/sss_example.ctl"),overwrite=TRUE)
#Forecast file modfications
#Reference points
#if(!input$use_forecastnew)
#{
forecast.file<-SS_readforecast(paste0("Scenarios/",input$Scenario_name,"/forecast.ss"))
if(input$RP_choices){
forecast.file$SPRtarget<-input$SPR_target
forecast.file$Btarget<-input$B_target
CR_choices<-c("1: Catch fxn of SSB, buffer on F",
"2: F fxn of SSB, buffer on F",
"3: Catch fxn of SSB, buffer on catch",
"4: F fxn of SSB, buffer on catch")
CR_choices_num.vec<-c(1:4)
forecast.file$ControlRuleMethod<-CR_choices_num.vec[CR_choices==input$CR_Ct_F]
forecast.file$SBforconstantF<-input$slope_hi
forecast.file$BfornoF<-input$slope_low
}
if(input$Forecast_choice)
{
forecast.file$Nforecastyrs<-input$forecast_num
buffer.in<-as.numeric(trimws(unlist(strsplit(input$forecast_buffer,","))))
if(length(buffer.in)==1){forecast.file$Flimitfraction<-buffer.in}
if(length(buffer.in)>1)
{
forecast.file$Flimitfraction<--1
buffer.datafr<-data.frame(Year=c((data.file$endyr+1):(data.file$endyr+input$forecast_num)),Fraction=buffer.in)
rownames(buffer.datafr)<-paste0("#_Flimitfraction_m",1:input$forecast_num)
forecast.file$Flimitfraction_m<-buffer.datafr
}
}
SS_writeforecast(forecast.file,paste0("Scenarios/",input$Scenario_name),overwrite=TRUE)
#}
#if(input$use_forecastnew)
# {
# forecast.file<-SS_readforecast(paste0("Scenarios/",input$Scenario_name,"/forecast.ss_new"))
# SS_writeforecast(forecast.file,paste0("Scenarios/",input$Scenario_name),overwrite=TRUE)
# }
#Set prior inputs
#0 = normal
#10 = truncated normal
#1 = symmetric beta (rbeta)
#2 = beta
#3 = lognormal
#30 = truncated lognormal
#4 = uniform
#99 = used only for the steepness parameter. Indicates h will come from FMSY/M prior
sss.prior.name<-c("no prior","symmetric beta","beta","normal","truncated normal","lognormal","truncated lognormal","uniform")
sss.prior.type<-c(-1,1,2,0,10,3,30,4)
Dep.in_sss<-c(sss.prior.type[sss.prior.name==input$Depl_prior_sss],input$Depl_mean_sss,input$Depl_SD_sss)
h.in_sss<-c(sss.prior.type[sss.prior.name==input$h_prior_sss],input$h_mean_sss,input$h_SD_sss)
if(!input$male_offset_SSS)
{
M.in_sss<-c(sss.prior.type[sss.prior.name==input$M_prior_sss],input$M_f_mean_sss,input$M_f_SD_sss,sss.prior.type[sss.prior.name==input$M_prior_sss],input$M_f_mean_sss,input$M_f_SD_sss)
Linf.in_sss<-c(sss.prior.type[sss.prior.name==input$Linf_f_prior_sss],input$Linf_f_mean_sss,input$Linf_f_SD_sss,sss.prior.type[sss.prior.name==input$Linf_f_prior_sss],input$Linf_f_mean_sss,input$Linf_f_SD_sss)
k.in_sss<-c(sss.prior.type[sss.prior.name==input$k_f_prior_sss],input$k_f_mean_sss,input$k_f_SD_sss,sss.prior.type[sss.prior.name==input$k_f_prior_sss],input$k_f_mean_sss,input$k_f_SD_sss)
t0.in_sss<-c(sss.prior.type[sss.prior.name==input$t0_f_prior_sss],input$t0_f_mean_sss,input$t0_f_SD_sss,sss.prior.type[sss.prior.name==input$t0_f_prior_sss],input$t0_f_mean_sss,input$t0_f_SD_sss)
}
if(input$male_offset_SSS)
{
M.in_sss<-c(sss.prior.type[sss.prior.name==input$M_prior_sss],input$M_f_mean_sss,input$M_f_SD_sss,sss.prior.type[sss.prior.name==input$M_prior_sss],0,0)
Linf.in_sss<-c(sss.prior.type[sss.prior.name==input$Linf_f_prior_sss],input$Linf_f_mean_sss,input$Linf_f_SD_sss,sss.prior.type[sss.prior.name==input$Linf_f_prior_sss],0,0)
k.in_sss<-c(sss.prior.type[sss.prior.name==input$k_f_prior_sss],input$k_f_mean_sss,input$k_f_SD_sss,sss.prior.type[sss.prior.name==input$k_f_prior_sss],0,0)
t0.in_sss<-c(sss.prior.type[sss.prior.name==input$t0_f_prior_sss],input$t0_f_mean_sss,input$t0_f_SD_sss,sss.prior.type[sss.prior.name==input$t0_f_prior_sss],0,0)
}
if(input$male_parms_SSS)
{
M.in_sss<-c(sss.prior.type[sss.prior.name==input$M_prior_sss],input$M_f_mean_sss,input$M_f_SD_sss,sss.prior.type[sss.prior.name==input$M_m_prior_sss],input$M_m_mean_sss,input$M_m_SD_sss)
Linf.in_sss<-c(sss.prior.type[sss.prior.name==input$Linf_f_prior_sss],input$Linf_f_mean_sss,input$Linf_f_SD_sss,sss.prior.type[sss.prior.name==input$Linf_m_prior_sss],input$Linf_m_mean_sss,input$Linf_f_SD_sss)
k.in_sss<-c(sss.prior.type[sss.prior.name==input$k_f_prior_sss],input$k_f_mean_sss,input$k_f_SD_sss,sss.prior.type[sss.prior.name==input$k_m_prior_sss],input$k_m_mean_sss,input$k_m_SD_sss)
t0.in_sss<-c(sss.prior.type[sss.prior.name==input$t0_f_prior_sss],input$t0_f_mean_sss,input$t0_f_SD_sss,sss.prior.type[sss.prior.name==input$t0_m_prior_sss],input$t0_m_mean_sss,input$t0_m_SD_sss)
}
show_modal_spinner(spin="flower",color=wes_palettes$Zissou1[2],text="Model run in progress")
#Run SSS
SSS.out<-SSS(paste0("Scenarios/",input$Scenario_name),
file.name=c("sss_example.dat","sss_example.ctl"),
reps=input$SSS_reps,
seed.in=input$SSS_seed,
Dep.in=Dep.in_sss,
M.in=M.in_sss,
SR_type=3,
h.in=h.in_sss,
FMSY_M.in=c(-1,0.5,0.1),
BMSY_B0.in=c(-1,0.5,0.1),
Linf.k.cor=input$Linf_k_cor_sss,
Linf.in=Linf.in_sss,
k.in=k.in_sss,
t0.in=t0.in_sss,
Zfrac.Beta.in=c(-99,0.2,0.6,-99,0.5,2),
R_start=c(0,input$lnR0_sss),
doR0.loop=c(1,round(input$lnR0_sss*0.5),round(input$lnR0_sss*1.5),(round(input$lnR0_sss*1.3)-round(input$lnR0_sss*0.5))/10),
sum_age=0,
ts_yrs=c(input$styr,input$endyr),
pop.ltbins=NA,
#ofl_yrs=c(input$endyr+1,input$endyr+2),
sexes=T,
BH_FMSY_comp=F,
OStype=input$OS_choice)
#save(SSS.out)
show_modal_spinner(spin="flower",color=wes_palettes$Zissou1[3],text="Process model output")
if(exists(load(paste0("Scenarios/",input$Scenario_name,"/SSS_out.DMP"))))
{
output$SSS_priors_post<-renderPlot({
if(exists(load(paste0("Scenarios/",input$Scenario_name,"/SSS_out.DMP"))))
{
load(paste0("Scenarios/",input$Scenario_name,"/SSS_out.DMP"))
sss.M.f<-rbind(data.frame(value=SSS.out$Prior$M_f,type="prior",metric="Female M"),data.frame(value=SSS.out$Post$M_f,type="post",metric="Female M"))
sss.M.m<-rbind(data.frame(value=SSS.out$Prior$M_m,type="prior",metric="Male M"),data.frame(value=SSS.out$Post$M_m,type="post",metric="Male M"))
sss.h<-rbind(data.frame(value=SSS.out$Prior$h,type="prior",metric="h"),data.frame(value=SSS.out$Post$h,type="post",metric="h"))
sss.Dep<-rbind(data.frame(value=SSS.out$Prior$Dep,type="prior",metric="Dep"),data.frame(value=SSS.out$Post$Dep.Obs,type="post",metric="Dep"))
sss.vals.out<-rbind(sss.M.f,sss.M.m,sss.h,sss.Dep)
ggplot(sss.vals.out,aes(x=value,color=type,fill=type))+
geom_histogram(position="dodge",alpha=0.5)+
theme(legend.position="bottom")+
theme(legend.title=element_blank())+
facet_grid(~metric,scales = "free")
# Mf.plot<-ggplot(sss.M.f,aes(x=value,color=type))+geom_histogram(position="dodge",alpha=0.5,fill="white")
# Mm.plot<-ggplot(sss.M.m,aes(x=value,color=type))+geom_histogram(position="dodge",alpha=0.5,fill="white")
# h.plot<-ggplot(sss.h,aes(x=value,color=type))+geom_histogram(position="dodge",alpha=0.5,fill="white")
# Dep.plot<-ggplot(sss.Dep,aes(x=value,color=type))+geom_histogram(position="dodge",alpha=0.5,fill="white")
}
else{return(NULL)}
})
output$SSS_growth_priors_post<-renderPlot({
if(exists(load(paste0("Scenarios/",input$Scenario_name,"/SSS_out.DMP"))))
{
load(paste0("Scenarios/",input$Scenario_name,"/SSS_out.DMP"))
sss.L1_f<-rbind(data.frame(value=SSS.out$Prior$L1_f,type="prior",metric="Female L1"),data.frame(value=SSS.out$Post$L1_f,type="post",metric="Female L1"))
sss.Linf_f<-rbind(data.frame(value=SSS.out$Prior$Linf_f,type="prior",metric="Female Linf"),data.frame(value=SSS.out$Post$Linf_f,type="post",metric="Female Linf"))
sss.k_f<-rbind(data.frame(value=SSS.out$Prior$k_f,type="prior",metric="Female k"),data.frame(value=SSS.out$Post$k_f,type="post",metric="Female k"))
sss.L1_m<-rbind(data.frame(value=SSS.out$Prior$L1_m,type="prior",metric="Male L1"),data.frame(value=SSS.out$Post$L1_m,type="post",metric="Male L1"))
sss.Linf_m<-rbind(data.frame(value=SSS.out$Prior$Linf_m,type="prior",metric="Male Linf"),data.frame(value=SSS.out$Post$Linf_m,type="post",metric="Male Linf"))
sss.k_m<-rbind(data.frame(value=SSS.out$Prior$k_m,type="prior",metric="Male k"),data.frame(value=SSS.out$Post$k_m,type="post",metric="Male k"))
sss.vals.growth.out<-rbind(sss.L1_f,sss.Linf_f,sss.k_f,sss.L1_m,sss.Linf_m,sss.k_m)
ggplot(sss.vals.growth.out,aes(x=value,color=type,fill=type))+
geom_histogram(position="dodge",alpha=0.5)+
theme(legend.position="bottom")+
theme(legend.title=element_blank())+
facet_wrap(~metric,scales = "free")
}
else{return(NULL)}
})
output$SSS_OFL_plot<-renderPlot({
if(exists(load(paste0("Scenarios/",input$Scenario_name,"/SSS_out.DMP"))))
{
load(paste0("Scenarios/",input$Scenario_name,"/SSS_out.DMP"))
ofl.years<-as.numeric(unique(melt(SSS.out$OFL)$Var2))
ggplot(melt(SSS.out$OFL),aes(Var2,value,group=Var2))+
geom_boxplot(fill="#236192")+
scale_x_continuous(breaks=ofl.years,labels=as.character(ofl.years))+
ylab("OFL (mt)")+
xlab("Year")
}
else{return(NULL)}
})
output$SSS_ABC_plot<-renderPlot({
if(exists(load(paste0("Scenarios/",input$Scenario_name,"/SSS_out.DMP"))))
{
load(paste0("Scenarios/",input$Scenario_name,"/SSS_out.DMP"))
abc.years<-as.numeric(unique(melt(SSS.out$ABC)$Var2))
ggplot(melt(SSS.out$ABC),aes(Var2,value,group=Var2))+
geom_boxplot(fill="#658D1B")+
scale_x_continuous(breaks=abc.years,labels=as.character(abc.years))+
ylab("ABC (mt)")+
xlab("Year")
}
else{return(NULL)}
})
}
remove_modal_spinner()
})
###############
### END SSS ###
###############
##################################################################
### PREPARE FILES and RUN Length and Age-based Stock Synthsis ###
##################################################################
SS.file.update<-observeEvent(input$run_SS,{
# if(is.null(inFile) | !anyNA(inp$
# styr,ndyr,
# input$Nages,
# input$M_f,
# input$k_f,
# input$Linf_f,
# input$t0_f,
# input$L50_f,
# input$L95_f,
# input$M_m,
# input$k_m,
# input$Linf_m,
# input$t0_m,
# input$L50_m,
# input$L95_m,
# ))
# {
updateTabsetPanel(session, "tabs",
selected = '1')
# progress <- shiny::Progress$new(session, min=1, max=2)
# on.exit(progress$close())
# progress$set(message = 'Model run in progress',
# detail = '')
# for (i in 1:2) {
# progress$set(value = i)
# Sys.sleep(0.5)
# }
if(!any(input$use_par,input$use_datanew,input$use_controlnew,input$user_model))
#if(which(c(input$use_par,input$use_datanew,input$use_datanew_user,input$use_controlnew,input$use_controlnew_user,input$user_model))!=0)
{
#Copy and move files
if(file.exists(paste0("Scenarios/",input$Scenario_name)))
{
unlink(paste0("Scenarios/",input$Scenario_name),recursive=TRUE) #Deletes previous run
# file.remove(paste0(getwd(),"/Scenarios/",input$Scenario_name))
}
if(input$Ct_F_LO_select=="Estimate F" & is.null(rv.Ct$data)){
file.copy(paste0("SS_LO_F_files"),paste0("Scenarios"),recursive=TRUE,overwrite=TRUE)
file.rename(paste0("Scenarios/SS_LO_F_files"), paste0("Scenarios/",input$Scenario_name))
}
else{
file.copy(paste0("SS_LB_files"),paste0("Scenarios"),recursive=TRUE,overwrite=TRUE)
file.rename(paste0("Scenarios/SS_LB_files"), paste0("Scenarios/",input$Scenario_name))
}
}
# if(!input$use_customfile)
# {
# }
#Read data and control files
if(!input$user_model)
{
data.file<-SS_readdat(paste0("Scenarios/",input$Scenario_name,"/datafile.dat"))
ctl.file<-SS_readctl(paste0("Scenarios/",input$Scenario_name,"/controlfile.ctl"),use_datlist = TRUE, datlist=data.file)
}
if(input$use_datanew)
{
data.file<-SS_readdat(paste0("Scenarios/",input$Scenario_name,"/data_echo.ss_new"))
}
if(input$use_controlnew)
{
data.file<-SS_readdat(paste0("Scenarios/",input$Scenario_name,"/data_echo.ss_new"))
ctl.file<-SS_readctl(paste0("Scenarios/",input$Scenario_name,"/control.ss_new"),use_datlist = TRUE, datlist=data.file)
}
# data.file<-SS_readdat(paste0(getwd(),"/Scenarios/",input$Scenario_name,"/SS_LB.dat"))
# ctl.file<-SS_readctl(paste0(getwd(),"/Scenarios/",input$Scenario_name,"/SS_LB.ctl"),use_datlist = TRUE, datlist=data.file)
#if(input$Ct_F_LO_select=="Estimate F" & is.null(rv.Ct$data))
# {
# data.file<-SS_readdat(paste0(getwd(),"/Scenarios/",input$Scenario_name,"/SS_LB.dat"))
# ctl.file<-SS_readctl(paste0(getwd(),"/Scenarios/",input$Scenario_name,"/SS_LB.ctl"),use_datlist = TRUE, datlist=data.file)
# }
if(!input$user_model)
{
#Prepare inputs to evaluate any errors
Sel50<-as.numeric(trimws(unlist(strsplit(input$Sel50,","))))
Sel50_phase<-as.numeric(trimws(unlist(strsplit(input$Sel50_phase,","))))
Selpeak<-as.numeric(trimws(unlist(strsplit(input$Selpeak,","))))
Selpeak_phase<-as.numeric(trimws(unlist(strsplit(input$Selpeak_phase,","))))
bin.width<-data.file$lbin_vector[2]-data.file$lbin_vector[1]
minmaxbin<-min(Selpeak[1]-min(data.file$lbin_vector),max(data.file$lbin_vector)-Selpeak[1])
#sel.inputs.comps<-length(Sel50)-length(Sel50_phase)-length(Selpeak)-length(Selpeak_phase)
sel.inputs.lts<-c(length(Sel50),length(Sel50_phase),length(Selpeak),length(Selpeak_phase))
Nfleets<-max(ncol(rv.Ct$data)-1,rv.Lt$data[,3],rv.Age$data[,3],rv.Index$data[,3])
if(input$Sel_choice=="Dome-shaped")
{
PeakDesc<-as.numeric(trimws(unlist(strsplit(input$PeakDesc,","))))
PeakDesc_phase<-as.numeric(trimws(unlist(strsplit(input$PeakDesc_phase,","))))
LtPeakFinal<-as.numeric(trimws(unlist(strsplit(input$LtPeakFinal,","))))
LtPeakFinal_phase<-as.numeric(trimws(unlist(strsplit(input$LtPeakFinal_phase,","))))
FinalSel<-as.numeric(trimws(unlist(strsplit(input$FinalSel,","))))
FinalSel_phase<-as.numeric(trimws(unlist(strsplit(input$FinalSel_phase,","))))
minmaxbin<-min(Selpeak[1]-min(data.file$lbin_vector),max(data.file$lbin_vector)-Selpeak[1])
sel.inputs.lts<-c(length(Sel50),length(Sel50_phase),length(Selpeak),length(Selpeak_phase),length(PeakDesc),length(PeakDesc_phase),length(LtPeakFinal),length(LtPeakFinal_phase),length(FinalSel),length(FinalSel_phase))
}
#Search for errors in inputs
#Throw warning if not enough selectivity inputs
if(!all(Nfleets==sel.inputs.lts))
{
#Throw warning if not enough selectivity inputs
sendSweetAlert(
session = session,
title = "Selectivity input warning",
text = "Please check to see if you have provided filled in the inputs correctly. Especially check selectivity for missing fleets (both in parameter and phases). Total fleets includes fishing fleets and surveys.",
type = "error")
remove_modal_spinner()
}
if(all(Nfleets==sel.inputs.lts))
{
checkmod<-1 #add object to verify no errors in inputs and model can be run
show_modal_spinner(spin="flower",color=wes_palettes$Zissou1[2],text="Model run in progress")
if(!input$use_par)
{
if(all(!input$use_datanew,!input$user_model))
{
#Read, edit then write new DATA file
data.file$styr<-input$styr
data.file$endyr<-input$endyr
data.file$Nages<-Nages()
if(!is.null(rv.Ct$data)){catch.fleets<-max(ncol(rv.Ct$data)-1)}
if(all(!is.null(rv.Lt$data),is.null(rv.Ct$data))){catch.fleets<-max(rv.Lt$data[,3])}
data.file$Nfleets<-max(catch.fleets,rv.Lt$data[,3],rv.Age$data[,3],rv.Index$data[,3])
#########
#Catches#
#########
if (is.null(rv.Ct$data))
{
#inFile<- rv.Lt$data
Lt.comp.data<-rv.Lt$data
Age.comp.data<- rv.Age$data
#data.file$Nfleets<-max(Lt.comp.data[,2],Age.comp.data[,2])
if(input$Ct_F_LO_select=="Estimate F"){data.file$bycatch_fleet_info[4:5]<-c(input$styr,input$endyr)}
if(data.file$Nfleets>1){
for(i in 1:(data.file$Nfleets-1))
{
if(input$Ct_F_LO_select=="Estimate F"){data.file$bycatch_fleet_info<-rbind(data.file$bycatch_fleet_info,data.file$bycatch_fleet_info[1,])}
}
if(input$Ct_F_LO_select=="Estimate F"){data.file$bycatch_fleet_info[,1]<-c(1:data.file$Nfleets)}
}
year.in<-input$styr:input$endyr
catch.cols<-colnames(data.file$catch)
catch_temp<-list()
if(catch.fleets==1){catch.level<-1000}
if(catch.fleets>1){
catch.level<-as.numeric(trimws(unlist(strsplit(input$Wt_fleet_Ct,","))))
catch.level<-catch.level/sum(catch.level)*1000
}
for(i in 1:catch.fleets)
{
catch_temp[[i]]<-data.frame(
c(-999,year.in),
rep(1,length(year.in)+1),
rep(i,length(year.in)+1),
c(catch.level[i],rep(catch.level[i],length(year.in))),
c(0.01,rep(1000,length(year.in)))
)
}
data.file$catch<-list.rbind(catch_temp)
colnames(data.file$catch)<-catch.cols
}
if(!is.null(rv.Ct$data))
{
Catch.data<-rv.Ct$data
#data.file$Nfleets<-max(ncol(Catch.data)-1,data.file$Nfleets)
year.in<-Catch.data[,1]
catch.cols<-colnames(data.file$catch)
catch_temp<-list()
for(i in 1:catch.fleets)
{
catch_temp[[i]]<-data.frame(
c(-999,year.in),
rep(1,length(year.in)+1),
rep(i,length(year.in)+1),
c(0.00000000000000000001,Catch.data[,i+1]),
rep(0.01,length(year.in)+1)
)
}
data.file$catch<-list.rbind(catch_temp)
colnames(data.file$catch)<-catch.cols
}
#Index data
if (!is.null(rv.Index$data)) {
Index.data<-rv.Index$data
data.file$N_cpue<-unique(rv.Index$data[,3])
data.file$CPUE<-data.frame(year=rv.Index$data[,1],seas=rv.Index$data[,2],index=rv.Index$data[,3],obs=rv.Index$data[,4],se_log=rv.Index$data[,5])
}
#########################
#Length composition data#
#########################
#Population length data bins
data.file$binwidth<-2
if(!is.null(rv.Lt$data)){data.file$binwidth<-as.numeric(colnames(rv.Lt$data)[7])-as.numeric(colnames(rv.Lt$data)[6])}
data.file$minimum_size<-2
if(!is.null(rv.Lt$data)){data.file$minimum_size<-as.numeric(colnames(rv.Lt$data)[6])}
max.bin.in<-2*(round((Linf()+(Linf()*0.25))/2))+2 #0.2326
data.file$maximum_size<-max.bin.in
# if(input$advance_ss_click)
# {
data.file$binwidth<-input$lt_bin_size
data.file$minimum_size<-input$lt_min_bin
data.file$maximum_size<-input$lt_max_bin
# }
#inFile<- rv.Lt$data
if (is.null(rv.Lt$data)) {
if(input$est_parms==FALSE){Linf_bins<-input$Linf_f_fix}
if(input$est_parms==TRUE){Linf_bins<-input$Linf_f_mean}
data.file$binwidth<-2
data.file$minimum_size<-2
max.bin.in<-2*(round((Linf()+(Linf()*0.25))/2))+2 #0.2326
data.file$maximum_size<-max.bin.in
data.file$lbin_vector<-seq(data.file$minimum_size,data.file$maximum_size,data.file$binwidth)
data.file$N_lbins<-length(data.file$lbin_vector)
data.file$lencomp<-NULL
}
if (!is.null(rv.Lt$data)) {
Lt.comp.data<-rv.Lt$data
data.file$N_lbins<-ncol(Lt.comp.data)-5
data.file$lbin_vector<-as.numeric(colnames(rv.Lt$data)[6:ncol(rv.Lt$data)]) #as.numeric(colnames(Lt.comp.data[,5:ncol(Lt.comp.data)]))
if(data.file$maximum_size<max(data.file$lbin_vector)){data.file$maximum_size<-(2*round(max(data.file$lbin_vector)/2))+2}
lt.data.names<-c(colnames(data.file$lencomp[,1:6]),paste0("f",data.file$lbin_vector),paste0("m",data.file$lbin_vector))
lt.data.females<-lt.data.males<-lt.data.unknowns<-lt.data.sex3<-data.frame(matrix(rep(NA,length(lt.data.names)),nrow=1))
colnames(Lt.comp.data)[1:5]<-c("Year","Month","Fleet","Sex","Nsamps")
#female lengths
if(nrow(subset(Lt.comp.data,Sex==1))>0){
Lt.comp.data_female<-subset(Lt.comp.data,Sex==1 & Nsamps>0)
samp.yrs<-Lt.comp.data_female[,1]
lt.data.females<-data.frame(cbind(samp.yrs,
Lt.comp.data_female[,2],
Lt.comp.data_female[,3],
Lt.comp.data_female[,4],
rep(0,length(samp.yrs)),
Lt.comp.data_female[,5],
Lt.comp.data_female[,6:ncol(Lt.comp.data_female)],
Lt.comp.data_female[,6:ncol(Lt.comp.data_female)]*0)
)
}
#male lengths
if(nrow(subset(Lt.comp.data,Sex==2))>0){
Lt.comp.data_male<-subset(Lt.comp.data,Sex==2 & Nsamps>0)
samp.yrs_males<-Lt.comp.data_male[,1]
lt.data.males<-data.frame(cbind(samp.yrs_males,
Lt.comp.data_male[,2],
Lt.comp.data_male[,3],
Lt.comp.data_male[,4],
rep(0,length(samp.yrs_males)),
Lt.comp.data_male[,5],
Lt.comp.data_male[,6:ncol(Lt.comp.data_male)]*0,
Lt.comp.data_male[,6:ncol(Lt.comp.data_male)])
)
}
#unknown sex lengths
if(nrow(subset(Lt.comp.data,Sex==0))>0){
Lt.comp.data_unknown<-subset(Lt.comp.data,Sex==0 & Nsamps>0)
samp.yrs_unknown<-Lt.comp.data_unknown[,1]
lt.data.unknowns<-data.frame(cbind(samp.yrs_unknown,
Lt.comp.data_unknown[,2],
Lt.comp.data_unknown[,3],
Lt.comp.data_unknown[,4],
rep(0,length(samp.yrs_unknown)),
Lt.comp.data_unknown[,5],
Lt.comp.data_unknown[,6:ncol(Lt.comp.data_unknown)],
Lt.comp.data_unknown[,6:ncol(Lt.comp.data_unknown)]*0)
)
}
#Maintain sample sex ratio
if(input$Sex3){
yrsfleet_females<-paste0(Lt.comp.data_female[,1],Lt.comp.data_female[,3])
yrsfleet_males<-paste0(Lt.comp.data_male[,1],Lt.comp.data_male[,3])
#Match years
#samp.yrs_sex3<-samp.yrs_females[match(samp.yrs_males,samp.yrs_females)]
sex3_match_female<-yrsfleet_females%in%yrsfleet_males
sex3_match_male<-yrsfleet_males%in%yrsfleet_females
#Subset years
Lt.comp.data_female_sex3<-Lt.comp.data_female[sex3_match_female,]
Lt.comp.data_male_sex3<-Lt.comp.data_male[sex3_match_male,]
lt.data.sex3<-data.frame(cbind(Lt.comp.data_female_sex3[,1],
Lt.comp.data_female_sex3[,2],
Lt.comp.data_female_sex3[,3],
rep(3,nrow(Lt.comp.data_female_sex3)),
rep(0,nrow(Lt.comp.data_female_sex3)),
Lt.comp.data_female_sex3[,5]+Lt.comp.data_male_sex3[,4],
Lt.comp.data_female_sex3[,6:ncol(Lt.comp.data_female_sex3)],
Lt.comp.data_male_sex3[,6:ncol(Lt.comp.data_male_sex3)])
)
lt.data.females<-lt.data.females[!sex3_match_female,]
lt.data.males<-lt.data.males[!sex3_match_male,]
}
colnames(lt.data.females)<-colnames(lt.data.males)<-colnames(lt.data.unknowns)<-colnames(lt.data.sex3)<-lt.data.names
data.file$lencomp<-na.omit(rbind(lt.data.unknowns,lt.data.females,lt.data.males,lt.data.sex3))
}
#}
#else{
# data.file$lencomp<-data.frame(matrix(cbind(samp.yrs,
# rep(1,length(samp.yrs)),
# rep(1,length(samp.yrs)),
# rep(1,length(samp.yrs)),
# rep(0,length(samp.yrs)),
# colSums(Lt.comp.data[-1]),
# t(Lt.comp.data)[-1,],
# t(Lt.comp.data)[-1,]*0),
# nrow=length(samp.yrs),
# ncol=6+length(Lt.comp.data[,1])*2,
# byrow=FALSE))[,,drop=FALSE]
# }
# colnames(data.file$lencomp)<-lt.data.names
######################
#Age composition data#
######################
Age.comp.data<-rv.Age$data
if (is.null(Age.comp.data))
{
data.file$N_agebins<-Nages()
data.file$agebin_vector<-0:(Nages()-1)
data.file$ageerror<-data.frame(matrix(c(rep(-1,(Nages()+1)),rep(0.001,(Nages()+1))),2,(Nages()+1),byrow=TRUE))
colnames(data.file$ageerror)<-paste0("age",0:Nages())
}
if (!is.null(Age.comp.data))
{
data.file$N_agebins<-ncol(Age.comp.data)-8
data.file$agebin_vector<-as.numeric(colnames(Age.comp.data[,9:ncol(Age.comp.data)]))
data.file$ageerror<-data.frame(matrix(c(rep(-1,(Nages()+1)),rep(0.001,(Nages()+1))),2,(Nages()+1),byrow=TRUE))
if(!is.null(input$Ageing_error_choice)){
if(input$Ageing_error_choice)
{
data.file$ageerror<-data.frame((rv.AgeErr$data))
data.file$N_ageerror_definitions<-nrow(rv.AgeErr$data)/2
}
}
#Label object for r4ss
colnames(data.file$ageerror)<-paste0("age",0:Nages())
rownames(data.file$ageerror)<-c(1:nrow(data.file$ageerror))
# data.file$ageerror<-data.frame(matrix(c(rep(-1,(Nages()+1)),rep(0.001,(Nages()+1))),2,(Nages()+1),byrow=TRUE))
# colnames(data.file$ageerror)<-paste0("age",1:Nages())
age.data.names<-c(c("Yr","Month","Fleet","Sex","Part","Ageerr","Lbin_lo","Lbin_hi","Nsamp"),paste0("f",data.file$agebin_vector),paste0("m",data.file$agebin_vector))
age.data.females<-age.data.males<-age.data.unknowns<-data.frame(matrix(rep(NA,length(age.data.names)),nrow=1))
colnames(Age.comp.data)[1:8]<-c("Year","Month","Fleet","Sex","AgeErr","Lbin_low","Lbin_hi","Nsamps")
#female ages
if(nrow(subset(Age.comp.data,Sex==1))>0){
Age.comp.data_female<-subset(Age.comp.data,Sex==1 & Nsamps>0)
samp.yrs_females<-Age.comp.data_female[,1]
age.data.females<-data.frame(cbind(samp.yrs_females,
Age.comp.data_female[,2],
Age.comp.data_female[,3],
Age.comp.data_female[,4],
rep(0,length(samp.yrs_females)),
Age.comp.data_female[,5],
Age.comp.data_female[,6],
Age.comp.data_female[,7],
Age.comp.data_female[,8],
Age.comp.data_female[,9:ncol(Age.comp.data_female)],
Age.comp.data_female[,9:ncol(Age.comp.data_female)]*0)
)
}
#male ages
if(nrow(subset(Age.comp.data,Sex==2))>0){
Age.comp.data_male<-subset(Age.comp.data,Sex==2 & Nsamps>0)
samp.yrs_males<-Age.comp.data_male[,1]
age.data.males<-data.frame(cbind(samp.yrs_males,
Age.comp.data_male[,2],
Age.comp.data_male[,3],
Age.comp.data_male[,4],
rep(0,length(samp.yrs_males)),
Age.comp.data_male[,5],
Age.comp.data_male[,6],
Age.comp.data_male[,7],
Age.comp.data_male[,8],
Age.comp.data_male[,9:ncol(Age.comp.data_male)]*0,
Age.comp.data_male[,9:ncol(Age.comp.data_male)])
)
}
#unknown sex ages
if(nrow(subset(Age.comp.data,Sex==0))>0){
Age.comp.data_unknown<-subset(Age.comp.data,Sex==0 & Nsamps>0)
samp.yrs_unknown<-Age.comp.data_unknown[,1]
age.data.unknowns<-data.frame(cbind(samp.yrs_unknown,
Age.comp.data_unknown[,2],
Age.comp.data_unknown[,3],
Age.comp.data_unknown[,4],
rep(0,length(samp.yrs_unknown)),
Age.comp.data_unknown[,5],
Age.comp.data_unknown[,6],
Age.comp.data_unknown[,7],
Age.comp.data_unknown[,8],
Age.comp.data_unknown[,9:ncol(Age.comp.data_unknown)],
Age.comp.data_unknown[,9:ncol(Age.comp.data_unknown)]*0)
)
}
#if(nrow(subset(Age.comp.data,Sex==0))>0){age.data.unknowns<-data.frame(cbind(
# age.data.unknowns,
# Age.comp.data[1,7:ncol(Age.comp.data_unknown)],
# Age.comp.data[1,7:ncol(Age.comp.data_unknown)]*0))
# }
colnames(age.data.females)<-colnames(age.data.males)<-colnames(age.data.unknowns)<-age.data.names
data.file$agecomp<-na.omit(rbind(age.data.females,age.data.males,age.data.unknowns))
}
# inFile_age<- rv.Age$data
# if (is.null(inFile_age)){
# data.file$N_agebins<-Nages()
# data.file$agebin_vector<-1:Nages()
# data.file$ageerror<-data.frame(matrix(c(rep(-1,(Nages()+1)),rep(0.001,(Nages()+1))),2,(Nages()+1),byrow=TRUE))
# colnames(data.file$ageerror)<-paste0("age",1:Nages())
# }
# if (!is.null(inFile_age)){
# Age.comp.data<-rv.Age$data
# age.classes<-nrow(Age.comp.data)
# data.file$N_agebins<-age.classes
# data.file$agebin_vector<-Age.comp.data[,1]
# data.file$ageerror<-data.frame(matrix(c(rep(-1,(age.classes+1)),rep(0.001,(age.classes+1))),2,(age.classes+1),byrow=TRUE))
# colnames(data.file$ageerror)<-paste0("age",1:Nages())
# age.samp.yrs<-as.numeric(colnames(Age.comp.data)[-1])
# age.data.names<-c(c("Yr","Seas","FltSvy","Gender","Part","Ageerr","Lbin_lo","Lbin_hi","Nsamp"),paste0("f",Age.comp.data[,1]),paste0("m",Age.comp.data[,1]))
# if(length(age.samp.yrs)==1){
# data.file$agecomp<-data.frame(matrix(c(samp.yrs,
# rep(1,length(age.samp.yrs)),
# rep(1,length(age.samp.yrs)),
# rep(1,length(age.samp.yrs)),
# rep(0,length(age.samp.yrs)),
# rep(-1,length(age.samp.yrs)),
# rep(-1,length(age.samp.yrs)),
# rep(-1,length(age.samp.yrs)),
# colSums(Age.comp.data[-1]),
# t(Age.comp.data)[-1,],
# t(Age.comp.data)[-1,]*0),
# nrow=length(age.samp.yrs),
# ncol=9+length(Age.comp.data[,1])*2,
# byrow=FALSE))[,,drop=FALSE]
# }
# else{
# data.file$agecomp<-data.frame(matrix(cbind(samp.yrs,
# rep(1,length(age.samp.yrs)),
# rep(1,length(age.samp.yrs)),
# rep(1,length(age.samp.yrs)),
# rep(0,length(age.samp.yrs)),
# rep(-1,length(age.samp.yrs)),
# rep(-1,length(age.samp.yrs)),
# rep(-1,length(age.samp.yrs)),
# colSums(Age.comp.data[-1]),
# t(Age.comp.data)[-1,],
# t(Age.comp.data)[-1,]*0),
# nrow=length(age.samp.yrs),
# ncol=9+length(Age.comp.data[,1])*2,
# byrow=FALSE))[,,drop=FALSE]
# }
# colnames(data.file$agecomp)<-age.data.names
# }
#Create data info
if(data.file$Nfleets>1){
for(i in 1:(data.file$Nfleets-1))
{
data.file$fleetinfo<-rbind(data.file$fleetinfo,data.file$fleetinfo[1,])
data.file$CPUEinfo<-rbind(data.file$CPUEinfo,data.file$CPUEinfo[1,])
data.file$len_info<-rbind(data.file$len_info,data.file$len_info[1,])
data.file$age_info<-rbind(data.file$age_info,data.file$age_info[1,])
}
#Set Dirichlet on
# data.file$age_info[,5]<-data.file$len_info[,5]<-1
#Set up the correct fleet enumeration
# data.file$len_info[,6]<-1:data.file$Nfleets #Used for Dirichlet set-up
# data.file$age_info[,6]<-(data.file$Nfleets+1):(2*data.file$Nfleets) #Used for Dirichlet set-up
#Survey names
if(is.null(rv.Ct$data)){data.file$fleetinfo$fleetname<-paste0("Fishery",1:data.file$Nfleets)}
if(!is.null(rv.Ct$data))
{
fishery.names<-gsub(" ","",colnames(rv.Ct$data)[-1])
if(!is.null(rv.Index$data)&data.file$Nfleets>catch.fleets)
{
Surveyonly<-subset(rv.Index$data,Fleet>catch.fleets)
fleet.survey.names<-unique(c(fishery.names,unique(Surveyonly[,6])))
survey.fleets<-unique(Surveyonly[,3])
data.file$fleetinfo$fleetname<-fleet.survey.names
}
if(is.null(rv.Index$data)|all(!is.null(rv.Index$data)&data.file$Nfleets==catch.fleets)){data.file$fleetinfo$fleetname<-fishery.names}
if(!is.null(rv.Index$data)& max(rv.Index$data[,3])>length(fishery.names)){data.file$fleetinfo[survey.fleets,1]<-3}
}
data.file$CPUEinfo[,1]<-1:data.file$Nfleets
}
if(!is.null(rv.Index$data)&data.file$Nfleets>catch.fleets)
{
if(any(fleet.survey.names=="RSS"))
{
data.file$CPUEinfo[grep("RSS",fleet.survey.names),2]<-34
}
}
#Change survey timing to 1
data.file$fleetinfo$surveytiming[data.file$fleetinfo$type%in%3]<-1
#Catch units
if(input$Ct_units_choice)
{
ct.units<-as.numeric(trimws(unlist(strsplit(input$fleet_ct_units,","))))
#data.file$fleetinfo[ct.units,4]<-2 #use this when just specifying which are fleets are numbers
data.file$fleetinfo[,4]<-ct.units
}
SS_writedat(data.file,paste0("Scenarios/",input$Scenario_name,"/datafile.dat"),overwrite=TRUE)
}
####################### END DATA FILE #####################################
##################################################################################
####################### START CTL FILE ####################################
#Read, edit then write new CONTROL file
if(all(!input$use_controlnew,!input$user_model))
{
#Change to 1 platoon
if(!is.null(input$GT1)){if(input$GT1){ctl.file$N_platoon<-1}}
#LENGTH or AGE-ONLY
if(all(!is.null(c(rv.Lt$data,rv.Age$data,rv.Index$data)),is.null(rv.Ct$data))==TRUE)
{
fem_vbgf<-VBGF(input$Linf_f,input$k_f,input$t0_f,c(0:Nages()))
#Females
ctl.file$MG_parms[1,3]<-input$M_f #M
#ctl.file$MG_parms[2,3:4]<-fem_vbgf[1] #L0
ctl.file$Growth_Age_for_L1<-input$t0_f
ctl.file$MG_parms[2,3:4]<-0 #L0
ctl.file$MG_parms[3,3:4]<-input$Linf_f #Linf
ctl.file$MG_parms[4,3:4]<-input$k_f #k
ctl.file$MG_parms[5,3:4]<-as.numeric(trimws(unlist(strsplit(input$CV_lt_f,","))))[1] #CV
ctl.file$MG_parms[6,3:4]<-as.numeric(trimws(unlist(strsplit(input$CV_lt_f,","))))[2] #CV
#Maturity
ctl.file$MG_parms[9,3:4]<-input$L50_f #Lmat50%
ctl.file$MG_parms[10,3:4]<- log(0.05/0.95)/(input$L95_f-input$L50_f) #Maturity slope
#ctl.file$MG_parms[11,3:4]<-input$Fec_a_f #coefficient
#ctl.file$MG_parms[12,3:4]<- input$Fec_b_f #exponent
#Males
ctl.file$MG_parms[13,3]<-input$M_f #M
#ctl.file$MG_parms[14,3:4]<-fem_vbgf[1] #L0
ctl.file$MG_parms[14,3:4]<-0 #L0
ctl.file$MG_parms[15,3:4]<-input$Linf_f #Linf
ctl.file$MG_parms[16,3:4]<-input$k_f #k
ctl.file$MG_parms[17,3:4]<-as.numeric(trimws(unlist(strsplit(input$CV_lt_f,","))))[1] #CV
ctl.file$MG_parms[18,3:4]<-as.numeric(trimws(unlist(strsplit(input$CV_lt_f,","))))[2] #CV
#ctl.file$MG_parms[19,3:4]<-input$WLa_f #coefficient
#ctl.file$MG_parms[20,3:4]<-input$WLb_f #exponent
if(input$male_offset)
{
ctl.file$parameter_offset_approach<-2 #Change to offset approach
ctl.file$MG_parms[13,3:4]<-0 #M
ctl.file$MG_parms[14,3:4]<-0 #L0
ctl.file$MG_parms[15,3:4]<-0 #Linf
ctl.file$MG_parms[16,3:4]<-0 #k
ctl.file$MG_parms[17,3:4]<-0 #CV
ctl.file$MG_parms[18,3:4]<-0 #CV
#Weight-length
ctl.file$MG_parms[19,3:4]<-0 #coefficient
ctl.file$MG_parms[20,3:4]<-0 #exponent
}
if(input$male_parms)
{
male_vbgf<-VBGF(input$Linf_m,input$k_m,input$t0_m,c(input$t0_f:Nages()))
ctl.file$MG_parms[13,3]<-input$M_m #M
ctl.file$MG_parms[14,3:4]<-male_vbgf[1] #L0
ctl.file$MG_parms[15,3:4]<-input$Linf_m #Linf
ctl.file$MG_parms[16,3:4]<-input$k_m #k
ctl.file$MG_parms[17,3:4]<-as.numeric(trimws(unlist(strsplit(input$CV_lt_m,","))))[1] #CV
ctl.file$MG_parms[18,3:4]<-as.numeric(trimws(unlist(strsplit(input$CV_lt_m,","))))[2] #CV
# ctl.file$MG_parms[19,3:4]<-input$WLa_m #coefficient
# ctl.file$MG_parms[20,3:4]<-input$WLb_m #exponent
}
if(input$Ct_F_LO_select=="Estimate F"){ctl.file$SR_parms[1,7]=-1} #lnR0
if(input$Ct_F_LO_select=="Constant Catch"){ctl.file$SR_parms[1,7]=1} #lnR0
ctl.file$SR_parms[2,3:4]<-input$h_LO #steepnes
}
#LENGTH and CATCH with fixed parameters
if(all(any(input$est_parms==FALSE,input$est_parms2==FALSE),any(all(!is.null(rv.Lt$data),!is.null(rv.Ct$data)),all(!is.null(rv.Age$data),!is.null(rv.Ct$data)),all(!is.null(rv.Index$data),!is.null(rv.Ct$data))))==TRUE)
{
fem_vbgf<-VBGF(input$Linf_f_fix,input$k_f_fix,input$t0_f_fix,c(0:Nages()))
#Females
ctl.file$MG_parms[1,3]<-input$M_f_fix #M
#ctl.file$MG_parms[2,3:4]<-fem_vbgf[1] #L0
ctl.file$Growth_Age_for_L1<-input$t0_f_fix
ctl.file$MG_parms[2,3:4]<-0 #L0
ctl.file$MG_parms[3,3:4]<-input$Linf_f_fix #Linf
ctl.file$MG_parms[4,3:4]<-input$k_f_fix #k
ctl.file$MG_parms[5,3:4]<-as.numeric(trimws(unlist(strsplit(input$CV_lt_f_fix,","))))[1] #CV
ctl.file$MG_parms[6,3:4]<-as.numeric(trimws(unlist(strsplit(input$CV_lt_f_fix,","))))[2] #CV
#Weight-length
ctl.file$MG_parms[7,3:4]<-input$WLa_f_fix #coefficient
ctl.file$MG_parms[8,3:4]<- input$WLb_f_fix #exponent
#Maturity
ctl.file$MG_parms[9,3:4]<-input$L50_f_fix #Lmat50%
ctl.file$MG_parms[10,3:4]<- log(0.05/0.95)/(input$L95_f_fix-input$L50_f_fix) #Maturity slope
ctl.file$MG_parms[11,3:4]<-input$Fec_a_f_fix #coefficient
ctl.file$MG_parms[12,3:4]<- input$Fec_b_f_fix #exponent
#Males
ctl.file$MG_parms[13,3]<-input$M_f_fix #M
#ctl.file$MG_parms[14,3:4]<-fem_vbgf[1] #L0
ctl.file$MG_parms[14,3:4]<-0 #L0
ctl.file$MG_parms[15,3:4]<-input$Linf_f_fix #Linf
ctl.file$MG_parms[16,3:4]<-input$k_f_fix #k
ctl.file$MG_parms[17,3:4]<-as.numeric(trimws(unlist(strsplit(input$CV_lt_f_fix,","))))[1] #CV
ctl.file$MG_parms[18,3:4]<-as.numeric(trimws(unlist(strsplit(input$CV_lt_f_fix,","))))[2] #CV
ctl.file$MG_parms[19,3:4]<-input$WLa_f_fix #coefficient
ctl.file$MG_parms[20,3:4]<- input$WLb_f_fix #exponent
if(input$male_offset_fix)
{
ctl.file$parameter_offset_approach<-2 #Change to offset approach
ctl.file$MG_parms[13,3:4]<-0 #M
ctl.file$MG_parms[14,3:4]<-0 #L0
ctl.file$MG_parms[15,3:4]<-0 #Linf
ctl.file$MG_parms[16,3:4]<-0 #k
ctl.file$MG_parms[17,3:4]<-0 #CV
ctl.file$MG_parms[18,3:4]<-0 #CV
#Weight-length
ctl.file$MG_parms[19,3:4]<-0 #coefficient
ctl.file$MG_parms[20,3:4]<-0 #exponent
}
if(input$male_parms_fix)
{
male_vbgf<-VBGF(input$Linf_m_fix,input$k_m_fix,input$t0_m_fix,c(input$t0_f_fix:Nages()))
ctl.file$MG_parms[13,3]<-input$M_m_fix #M
ctl.file$MG_parms[14,3:4]<-male_vbgf[1] #L0
ctl.file$MG_parms[15,3:4]<-input$Linf_m_fix #Linf
ctl.file$MG_parms[16,3:4]<-input$k_m_fix #k
ctl.file$MG_parms[17,3:4]<-as.numeric(trimws(unlist(strsplit(input$CV_lt_m_fix,","))))[1] #CV
ctl.file$MG_parms[18,3:4]<-as.numeric(trimws(unlist(strsplit(input$CV_lt_m_fix,","))))[2] #CV
#Weight-length
ctl.file$MG_parms[19,3:4]<-input$WLa_m_fix #coefficient
ctl.file$MG_parms[20,3:4]<-input$WLb_m_fix #exponent
}
#S-R
ctl.file$SR_parms[1,3:4]<-input$lnR0 #lnR0
ctl.file$SR_parms[2,3:4]<-input$h #steepnes
}
#LENGTH and CATCH with estimated parameters
if(all(any(input$est_parms==TRUE,input$est_parms2==FALSE),any(all(!is.null(rv.Lt$data),!is.null(rv.Ct$data)),all(!is.null(rv.Age$data),!is.null(rv.Ct$data)),all(!is.null(rv.Index$data),!is.null(rv.Ct$data))))==TRUE)
{
fem_vbgf<-VBGF(input$Linf_f_mean,input$k_f_mean,input$t0_f_mean,c(0:Nages()))
#c("lognormal","truncated normal","uniform","beta")
prior.name<-c("no prior","symmetric beta", "beta","lognormal","gamma","normal")
prior.type<-c(0:3,5,6)
#Females
#M
if(input$M_f_prior=="lognormal"){ctl.file$MG_parms[1,3:4]<-c(input$M_f_mean,log(input$M_f_mean))}
else {ctl.file$MG_parms[1,3:4]<-c(input$M_f_mean,input$M_f_mean)}
ctl.file$MG_parms[1,5]<-input$M_f_SD
ctl.file$MG_parms[1,6]<-prior.type[prior.name==input$M_f_prior]
ctl.file$MG_parms[1,7]<-input$M_f_phase
#L0
ctl.file$Growth_Age_for_L1<-input$t0_f_mean
# if(input$t0_f_prior=="lognormal"){ctl.file$MG_parms[2,3:4]<-c(fem_vbgf[1],log(fem_vbgf[1]))}
# else {ctl.file$MG_parms[2,3:4]<-fem_vbgf[1]}
if(input$t0_f_prior=="lognormal"){ctl.file$MG_parms[2,3:4]<-c(0,log(0.0000001))}
else {ctl.file$MG_parms[2,3:4]<-0}
ctl.file$MG_parms[2,5]<-input$t0_f_SD
ctl.file$MG_parms[2,6]<-prior.type[prior.name==input$t0_f_prior]
ctl.file$MG_parms[2,7]<-input$t0_f_phase
#Linf
if(input$Linf_f_prior=="lognormal"){ctl.file$MG_parms[3,3:4]<-c(input$Linf_f_mean,log(input$Linf_f_mean))}
else{ctl.file$MG_parms[3,3:4]<-input$Linf_f_mean}
ctl.file$MG_parms[3,5]<-input$Linf_f_SD
ctl.file$MG_parms[3,6]<-prior.type[prior.name==input$Linf_f_prior]
ctl.file$MG_parms[3,7]<-input$Linf_f_phase
#k
if(input$k_f_prior=="lognormal"){ctl.file$MG_parms[4,3:4]<-c(input$k_f_mean,log(input$k_f_mean))}
else {ctl.file$MG_parms[4,3:4]<-input$k_f_mean}
ctl.file$MG_parms[4,5]<-input$k_f_SD
ctl.file$MG_parms[4,6]<-prior.type[prior.name==input$k_f_prior]
ctl.file$MG_parms[4,7]<-input$k_f_phase
#CV young
if(input$CV_lt_f_young_prior=="lognormal"){ctl.file$MG_parms[5,3:4]<-c(input$CV_lt_f_young_mean,log(input$CV_lt_f_young_mean))}
else{ctl.file$MG_parms[5,3:4]<-input$CV_lt_f_young_mean}
ctl.file$MG_parms[5,5]<-input$CV_lt_f_young_SD
ctl.file$MG_parms[5,6]<-prior.type[prior.name==input$CV_lt_f_young_prior]
ctl.file$MG_parms[5,7]<-input$CV_lt_f_young_phase
#CV old
if(input$CV_lt_f_old_prior=="lognormal"){ctl.file$MG_parms[6,3:4]<-c(input$CV_lt_f_old_mean,log(input$CV_lt_f_old_mean))}
else{ctl.file$MG_parms[6,3:4]<-input$CV_lt_f_old_mean}
ctl.file$MG_parms[6,3:4]<-input$CV_lt_f_old_mean
ctl.file$MG_parms[6,5]<-input$CV_lt_f_old_SD
ctl.file$MG_parms[6,6]<-prior.type[prior.name==input$CV_lt_f_old_prior]
ctl.file$MG_parms[6,7]<-input$CV_lt_f_old_phase
#Weight-length
ctl.file$MG_parms[7,3:4]<-input$WLa_f_est #coefficient
ctl.file$MG_parms[8,3:4]<- input$WLb_f_est #exponent
#Maturity
ctl.file$MG_parms[9,3:4]<-input$L50_f_est #Lmat50%
ctl.file$MG_parms[10,3:4]<- log(0.05/0.95)/(input$L95_f_est-input$L50_f_est) #Maturity slope
ctl.file$MG_parms[11,3:4]<-input$Fec_a_f_est #coefficient
ctl.file$MG_parms[12,3:4]<- input$Fec_b_f_est #exponent
#Males
ctl.file$MG_parms[13,3:4]<-c(input$M_f_mean,log(input$M_f_mean)) #M
#ctl.file$MG_parms[14,3:4]<-fem_vbgf[1] #L0
ctl.file$MG_parms[14,3:4]<-0 #L0
ctl.file$MG_parms[15,3:4]<-input$Linf_f_mean #Linf
ctl.file$MG_parms[16,3:4]<-input$k_f_mean #k
ctl.file$MG_parms[17,3:4]<-input$CV_lt_f_old_mean #CV
ctl.file$MG_parms[18,3:4]<-input$CV_lt_f_old_mean #CV
#Weight-length
ctl.file$MG_parms[19,3:4]<-input$WLa_f_est #coefficient
ctl.file$MG_parms[20,3:4]<- input$WLb_f_est #exponent
if(input$male_offset_est)
{
ctl.file$parameter_offset_approach<-2 #Change to offset approach
ctl.file$MG_parms[13,3:4]<-0 #M
ctl.file$MG_parms[14,3:4]<-0 #L0
ctl.file$MG_parms[15,3:4]<-0 #Linf
ctl.file$MG_parms[16,3:4]<-0 #k
ctl.file$MG_parms[17,3:4]<-0 #CV
ctl.file$MG_parms[18,3:4]<-0 #CV
#Weight-length
ctl.file$MG_parms[19,3:4]<-0 #coefficient
ctl.file$MG_parms[20,3:4]<-0 #exponent
}
if(input$male_parms_est)
{
male_vbgf_est<-VBGF(input$Linf_m_mean,input$k_m_mean,input$t0_m_mean,c(input$t0_f_mean:Nages()))
# ctl.file$MG_parms[13,3]<-input$M_m_mean #M
# ctl.file$MG_parms[14,3:4]<-male_vbgf_est[1] #L0
# ctl.file$MG_parms[15,3:4]<-input$Linf_m_mean #Linf
# ctl.file$MG_parms[16,3:4]<-input$k_m_mean #k
# ctl.file$MG_parms[17,3:4]<-input$CV_lt_m_mean #CV
# ctl.file$MG_parms[18,3:4]<-input$CV_lt_m_mean #CV
#M
if(input$M_m_prior=="lognormal"){ctl.file$MG_parms[13,3:4]<-c(input$M_m_mean,log(input$M_m_mean))}
else {ctl.file$MG_parms[13,3:4]<-c(input$M_m_mean,input$M_m_mean)}
ctl.file$MG_parms[13,5]<-input$M_m_SD
ctl.file$MG_parms[13,6]<-prior.type[prior.name==input$M_m_prior]
ctl.file$MG_parms[13,7]<-input$M_m_phase
#L0
#if(input$t0_f_prior=="lognormal"){ctl.file$MG_parms[14,3:4]<-c(male_vbgf_est[1],log(male_vbgf_est[1]+0.000000001))}
#else {ctl.file$MG_parms[14,3:4]<-male_vbgf_est[1]}
if(input$t0_m_prior=="lognormal"){ctl.file$MG_parms[14,3:4]<-c(male_vbgf_est[1],log(male_vbgf_est[1]+0.000000001))}
else {ctl.file$MG_parms[14,3:4]<-male_vbgf_est[1]}
ctl.file$MG_parms[14,5]<-input$t0_m_SD
ctl.file$MG_parms[14,6]<-prior.type[prior.name==input$t0_m_prior]
ctl.file$MG_parms[14,7]<-input$t0_m_phase
#Linf
if(input$Linf_m_prior=="lognormal"){ctl.file$MG_parms[15,3:4]<-c(input$Linf_m_mean,log(input$Linf_m_mean))}
else{ctl.file$MG_parms[15,3:4]<-input$Linf_m_mean}
ctl.file$MG_parms[15,5]<-input$Linf_m_SD
ctl.file$MG_parms[15,6]<-prior.type[prior.name==input$Linf_m_prior]
ctl.file$MG_parms[15,7]<-input$Linf_m_phase
#k
if(input$k_m_prior=="lognormal"){ctl.file$MG_parms[16,3:4]<-c(input$k_m_mean,log(input$k_m_mean))}
else {ctl.file$MG_parms[16,3:4]<-input$k_m_mean}
ctl.file$MG_parms[16,5]<-input$k_m_SD
ctl.file$MG_parms[16,6]<-prior.type[prior.name==input$k_m_prior]
ctl.file$MG_parms[16,7]<-input$k_m_phase
#CV young
if(input$CV_lt_m_young_prior=="lognormal"){ctl.file$MG_parms[17,3:4]<-c(input$CV_lt_m_young_mean,log(input$CV_lt_m_young_mean))}
else{ctl.file$MG_parms[17,3:4]<-input$CV_lt_m_young_mean}
ctl.file$MG_parms[17,5]<-input$CV_lt_m_young_SD
ctl.file$MG_parms[17,6]<-prior.type[prior.name==input$CV_lt_m_young_prior]
ctl.file$MG_parms[17,7]<-input$CV_lt_m_young_phase
#CV old
if(input$CV_lt_m_old_prior=="lognormal"){ctl.file$MG_parms[18,3:4]<-c(input$CV_lt_m_old_mean,log(input$CV_lt_m_old_mean))}
else{ctl.file$MG_parms[18,3:4]<-input$CV_lt_m_old_mean}
ctl.file$MG_parms[18,5]<-input$CV_lt_m_old_SD
ctl.file$MG_parms[18,6]<-prior.type[prior.name==input$CV_lt_m_old_prior]
ctl.file$MG_parms[18,7]<-input$CV_lt_m_old_phase
#Weight-length
ctl.file$MG_parms[19,3:4]<-input$WLa_m_est #coefficient
ctl.file$MG_parms[20,3:4]<- input$WLb_m_est #exponent
}
#S-R
ctl.file$SR_parms[1,3:4]<-input$lnR0_est #lnR0
if(input$h_ss_prior=="lognormal"){ctl.file$SR_parms[2,3:4]<-c(input$h_mean_ss,log(h_mean_ss))}
else{ctl.file$SR_parms[2,3:4]<-input$h_mean_ss}
ctl.file$SR_parms[2,5]<-input$h_SD_ss
ctl.file$SR_parms[2,6]<-prior.type[prior.name==input$h_ss_prior]
ctl.file$SR_parms[2,7]<-input$h_phase
}
#Recruitment estimation
ctl.file$do_recdev<-0
ctl.file$recdev_phase<- -1
ctl.file$MainRdevYrFirst<-input$styr #Start year of recruitment estimation
ctl.file$MainRdevYrLast<-input$endyr #Last year of recruitment estimation
ctl.file$last_early_yr_nobias_adj<-input$styr #End year of early rev devs (no bias)
ctl.file$first_yr_fullbias_adj<-input$styr #First year full bias
ctl.file$last_yr_fullbias_adj<-input$endyr #Last year full bias
ctl.file$first_recent_yr_nobias_adj<-input$endyr #First year recent no bias
if(input$rec_choice)
{
ctl.file$SR_parms[3,3:4]<-input$sigmaR #sigma R
if(input$RecDevChoice=="1: Devs sum to zero"){ctl.file$do_recdev<-1}
if(input$RecDevChoice=="2: Simple deviations"){ctl.file$do_recdev<-2}
if(input$RecDevChoice=="3: deviation vector"){ctl.file$do_recdev<-3}
if(input$RecDevChoice=="4: option 3 plus penalties"){ctl.file$do_recdev<-4}
ctl.file$MainRdevYrFirst<-input$Rdev_startyr #Start year of recruitment estimation
ctl.file$MainRdevYrLast<-input$Rdev_endyr #Last year of recruitment estimation
ctl.file$recdev_phase<- 1
if(input$biasC_choice)
{
#With bias correction
ctl.file$recdev_early_start<--1 #Year early rec dev phase starts
ctl.file$recdev_early_phase<-3 #Early rec dev phase
ctl.file$Fcast_recr_phase<-0 #Forecast rec dev phase
ctl.file$last_early_yr_nobias_adj<-input$NobiasC_early #End year of early rev devs (no bias)
ctl.file$first_yr_fullbias_adj<-input$BiasC_startyr #First year full bias
ctl.file$last_yr_fullbias_adj<-input$BiasC_endyr #Last year full bias
ctl.file$first_recent_yr_nobias_adj<-input$NobiasC_recent #First year recent no bias
ctl.file$max_bias_adj<-input$BiasC #Max bias adjustment
}
}
#SELECTIVITY
#Length Selectivity
if(input$Ct_F_LO_select=="Estimate F" & is.null(rv.Ct$data)){ctl.file$size_selex_types[2]<-3} #Change to recognize discard fishery
Sel50<-as.numeric(trimws(unlist(strsplit(input$Sel50,","))))
Sel50_phase<-as.numeric(trimws(unlist(strsplit(input$Sel50_phase,","))))
Selpeak<-as.numeric(trimws(unlist(strsplit(input$Selpeak,","))))
Selpeak_phase<-as.numeric(trimws(unlist(strsplit(input$Selpeak_phase,","))))
bin.width<-data.file$lbin_vector[2]-data.file$lbin_vector[1]
minmaxbin<-min(Selpeak[1]-min(data.file$lbin_vector),max(data.file$lbin_vector)-Selpeak[1])
sel.inputs.comps<-length(Sel50)-length(Sel50_phase)-length(Selpeak)-length(Selpeak_phase)
sel.inputs.lts<-c(length(Sel50),length(Sel50_phase),length(Selpeak),length(Selpeak_phase))
if(input$Sel_choice=="Logistic")
{
#Throw warning if not enough selectivity inputs
if(!all(data.file$Nfleets==sel.inputs.lts))
{
sendSweetAlert(
session = session,
title = "Selectivity input warning",
text = "Please check to see if you have provided selectivity inputs (both parameter and phases) for all fleets in the model. This includes fishing fleets and surverys.",
type = "error")
remove_modal_spinner()
stopApp()
}
#ctl.file$size_selex_parms[1,1:2]<-c(min(data.file$lbin_vector)+2*bin.width,max(data.file$lbin_vector)-2*bin.width)
#ctl.file$size_selex_parms[1,1:2]<-c(min(data.file$lbin_vector),max(data.file$lbin_vector))
ctl.file$size_selex_parms[1,1:2]<-c(Selpeak[1]-minmaxbin,Selpeak[1]+minmaxbin)
ctl.file$size_selex_parms[1,3:4]<- Selpeak[1]
ctl.file$size_selex_parms[2,3:4]<- 15
ctl.file$size_selex_parms[3,3:4]<- log(-((Sel50[1]-Selpeak[1])^2/log(0.5)))
ctl.file$size_selex_parms[4,3:4]<- -15
ctl.file$size_selex_parms[6,3:4]<- 15
#phases
ctl.file$size_selex_parms[1,7]<- Selpeak_phase[1]
ctl.file$size_selex_parms[2,7]<- -1
ctl.file$size_selex_parms[3,7]<- Sel50_phase[1]
ctl.file$size_selex_parms[4,7]<- -1
ctl.file$size_selex_parms[6,7]<- -1
}
if(input$Sel_choice=="Dome-shaped")
{
#Throw warning if not enough selectivity inputs
sel.inputs.comps<-length(Sel50)-length(Sel50_phase)-length(Selpeak)-length(Selpeak_phase)-length(PeakDesc)-length(PeakDesc_phase)-length(LtPeakFinal)-length(LtPeakFinal_phase)-length(FinalSel)-length(FinalSel_phase)
sel.inputs.lts<-c(length(Sel50),length(Sel50_phase),length(Selpeak),length(Selpeak_phase),length(PeakDesc),length(PeakDesc_phase),length(LtPeakFinal),length(LtPeakFinal_phase),length(FinalSel),length(FinalSel_phase))
if(!all(data.file$Nfleets==sel.inputs.lts))
{
sendSweetAlert(
session = session,
title = "Selectivity input warning",
text = "Please check to see if you have provided selectivity inputs (both parameter and phases) for all fleets in the model. This includes fishing fleets and surverys.",
type = "error")
remove_modal_spinner()
break
}
PeakDesc<-as.numeric(trimws(unlist(strsplit(input$PeakDesc,","))))
PeakDesc_phase<-as.numeric(trimws(unlist(strsplit(input$PeakDesc_phase,","))))
LtPeakFinal<-as.numeric(trimws(unlist(strsplit(input$LtPeakFinal,","))))
LtPeakFinal_phase<-as.numeric(trimws(unlist(strsplit(input$LtPeakFinal_phase,","))))
FinalSel<-as.numeric(trimws(unlist(strsplit(input$FinalSel,","))))
FinalSel_phase<-as.numeric(trimws(unlist(strsplit(input$FinalSel_phase,","))))
minmaxbin<-min(Selpeak[1]-min(data.file$lbin_vector),max(data.file$lbin_vector)-Selpeak[1])
#ctl.file$size_selex_parms[1,1:2]<-c(min(data.file$lbin_vector)+2*bin.width,max(data.file$lbin_vector)-2*bin.width)
#ctl.file$size_selex_parms[1,1:2]<-c(min(data.file$lbin_vector),max(data.file$lbin_vector))
ctl.file$size_selex_parms[1,1:2]<-c(Selpeak[1]-minmaxbin,Selpeak[1]+minmaxbin)
ctl.file$size_selex_parms[1,3:4]<- Selpeak[1]
ctl.file$size_selex_parms[2,3:4]<- -log((max(data.file$lbin_vector)-Selpeak[1]-bin.width)/(PeakDesc[1]-Selpeak[1]-bin.width))
ctl.file$size_selex_parms[3,3:4]<- log(-((Sel50[1]-Selpeak[1])^2/log(0.5)))
ctl.file$size_selex_parms[4,3:4]<- log(LtPeakFinal[1])
ctl.file$size_selex_parms[6,3:4]<- -log((1/(FinalSel[1]+0.000000001)-1))
#phases
ctl.file$size_selex_parms[1,7]<- Selpeak_phase[1]
ctl.file$size_selex_parms[2,7]<- PeakDesc_phase[1]
ctl.file$size_selex_parms[3,7]<- Sel50_phase[1]
ctl.file$size_selex_parms[4,7]<- LtPeakFinal_phase[1]
ctl.file$size_selex_parms[6,7]<- FinalSel_phase[1]
}
# if(input$dirichlet)
# {
# dirichlet.index<-c(unique(data.file$lencomp[,3]),(unique(data.file$agecomp[,3])+3))
# ctl.file$dirichlet_parms[dirichlet.index,3:4]<-0
# ctl.file$dirichlet_parms[dirichlet.index,7]<-2
# }
#Add other fleets
if(data.file$Nfleets>1){
for(i in 1:(data.file$Nfleets-1))
{
ctl.file$init_F<-rbind(ctl.file$init_F,ctl.file$init_F[1,])
ctl.file$size_selex_types<-rbind(ctl.file$size_selex_types,ctl.file$size_selex_types[1,])
if(input$Ct_F_LO_select=="Estimate F" & is.null(rv.Ct$data)){ctl.file$size_selex_types[,2]<-3}
ctl.file$age_selex_types<-rbind(ctl.file$age_selex_types,ctl.file$age_selex_types[1,])
ctl.file$size_selex_parms<-rbind(ctl.file$size_selex_parms,ctl.file$size_selex_parms[1:6,])
minmaxbin<-min(Selpeak[i+1]-min(data.file$lbin_vector),max(data.file$lbin_vector)-Selpeak[i+1])
if(input$Sel_choice=="Logistic")
{
ctl.file$size_selex_parms[6*i+3,3:4]<- log(-((Sel50[i+1]-Selpeak[i+1])^2/log(0.5)))
ctl.file$size_selex_parms[6*i+3,7]<- Sel50_phase[i+1]
#ctl.file$size_selex_parms[6*i+1,1:2]<-c(min(data.file$lbin_vector)+2*bin.width,max(data.file$lbin_vector)-2*bin.width)
# ctl.file$size_selex_parms[6*i+1,1:2]<-c(min(data.file$lbin_vector),max(data.file$lbin_vector))
ctl.file$size_selex_parms[6*i+1,1:2]<-c(Selpeak[i+1]-minmaxbin,Selpeak[1]+minmaxbin)
ctl.file$size_selex_parms[6*i+1,3:4]<- Selpeak[i+1]
ctl.file$size_selex_parms[6*i+1,7]<- Selpeak_phase[i+1]
ctl.file$size_selex_parms[6*i+2,3:4]<- 15
ctl.file$size_selex_parms[6*i+2,7]<- -1
ctl.file$size_selex_parms[6*i+4,3:4]<- -15
ctl.file$size_selex_parms[6*i+4,7]<- -1
ctl.file$size_selex_parms[6*i+6,3:4]<- 15
ctl.file$size_selex_parms[6*i+6,7]<- -1
}
if(input$Sel_choice=="Dome-shaped")
{
# ctl.file$size_selex_parms[6*i+1,1:2]<-c(min(data.file$lbin_vector),max(data.file$lbin_vector))
ctl.file$size_selex_parms[6*i+1,1:2]<-c(Selpeak[i+1]-minmaxbin,Selpeak[1]+minmaxbin)
ctl.file$size_selex_parms[6*i+1,3:4]<- Selpeak[i+1]
ctl.file$size_selex_parms[6*i+1,7]<- Selpeak_phase[i+1]
ctl.file$size_selex_parms[6*i+2,3:4]<- -log((max(data.file$lbin_vector)-Selpeak[i+1]-bin.width)/(PeakDesc[i+1]-Selpeak[i+1]-bin.width))
ctl.file$size_selex_parms[6*i+2,7]<- PeakDesc_phase[i+1]
ctl.file$size_selex_parms[6*i+3,3:4]<- log(-((Sel50[i+1]-Selpeak[i+1])^2/log(0.5)))
ctl.file$size_selex_parms[6*i+3,7]<- Sel50_phase[i+1]
ctl.file$size_selex_parms[6*i+4,3:4]<- log(LtPeakFinal[i+1])
ctl.file$size_selex_parms[6*i+4,7]<- LtPeakFinal_phase[i+1]
ctl.file$size_selex_parms[6*i+6,3:4]<- -log((1/(FinalSel[i+1]+0.000000001)-1))
ctl.file$size_selex_parms[6*i+6,7]<- FinalSel_phase[i+1]
}
#Dirichlet data-weighting
# ctl.file$dirichlet_parms<-rbind(ctl.file$dirichlet_parms,ctl.file$dirichlet_parms[1:2,])
}
#Re-label so r4ss can interpret these new entries
rownames(ctl.file$init_F)<-paste0("InitF_seas_1_flt_",1:data.file$Nfleets,"Fishery",1:data.file$Nfleets)
rownames(ctl.file$age_selex_types)<-rownames(ctl.file$size_selex_types)<-paste0("Fishery",1:data.file$Nfleets)
size_selex_parms_rownames<-list()
for(f_i in 1:data.file$Nfleets)
{
size_selex_parms_rownames[[f_i]]<-c(paste0("SizeSel_P_1_Fishery",f_i,"(",f_i,")"),
paste0("SizeSel_P_2_Fishery",f_i,"(",f_i,")"),
paste0("SizeSel_P_3_Fishery",f_i,"(",f_i,")"),
paste0("SizeSel_P_4_Fishery",f_i,"(",f_i,")"),
paste0("SizeSel_P_5_Fishery",f_i,"(",f_i,")"),
paste0("SizeSel_P_6_Fishery",f_i,"(",f_i,")"))
}
size_selex_parms_rownames<-unlist(size_selex_parms_rownames)
rownames(ctl.file$size_selex_parms)<-size_selex_parms_rownames
}
#Remove surveys from initial F lines and add q and xtra variance lines
if(!is.null(rv.Index$data))
{
if(data.file$Nfleets>catch.fleets){ctl.file$init_F<-ctl.file$init_F[-survey.fleets,]}
q.setup.names<-c("fleet","link","link_info","extra_se","biasadj", "float")
q.setup.lines<-data.frame(t(c(unique(rv.Index$data[,3])[1],1,0,0,0,1)))
if(input$Indexvar){q.setup.lines<-data.frame(t(c(unique(rv.Index$data[,3])[1],1,0,1,0,1)))}
qnames<-c("LO","HI","INIT","PRIOR","PR_SD","PR_type","PHASE","env_var&link","dev_link","dev_minyr","dev_maxyr","dev_PH","Block","Block_Fxn")
q.lines<-data.frame(t(c(-15,15,1,0,1,0,-1,rep(0,7))))
if(input$Indexvar){q.lines<-data.frame(rbind(c(-15,15,1,0,1,0,-1,rep(0,7)),c(0,5,0,0,99,0,3,0,0,0,0,0,0,0)))}
if(length(unique(rv.Index$data[,3]))>1)
{
for(q in 2:length(unique(rv.Index$data[,3])))
{
if(!input$Indexvar)
{
q.setup.lines<-rbind(q.setup.lines,c(unique(rv.Index$data[,3])[q],1,0,0,0,1))
q.lines<-rbind(q.lines,c(-15,15,1,0,1,0,-1,rep(0,7)))
}
if(input$Indexvar)
{
q.setup.lines<-rbind(q.setup.lines,c(unique(rv.Index$data[,3])[q],1,0,1,0,1))
#if(unique(rv.Index$data[,6])[q]!="RSS"){q.setup.lines<-rbind(q.setup.lines,c(unique(rv.Index$data[,3])[q],1,0,1,0,1))}
#if(unique(rv.Index$data[,6])[q]=="RSS"){q.setup.lines<-rbind(q.setup.lines,c(unique(rv.Index$data[,3])[q],1,0,0,0,1))}
if(unique(rv.Index$data[,6])[q]!="RSS"){q.lines<-rbind(q.lines,data.frame(rbind(c(-15,15,1,0,1,0,-1,rep(0,7)),c(0,5,0,0,99,0,3,0,0,0,0,0,0,0))))}
if(unique(rv.Index$data[,6])[q]=="RSS"){q.lines<-rbind(q.lines,data.frame(rbind(c(-15,15,1,0,1,0,-1,rep(0,7)),c(0,5,0,0,99,0,-3,0,0,0,0,0,0,0))))}
}
}
}
names(q.setup.lines)<-q.setup.names
rownames(q.setup.lines)<-unique(rv.Index$data[,6])
ctl.file$Q_options<-q.setup.lines
names(q.lines)<-qnames
if(!input$Indexvar){rownames(q.lines)<-paste0("LnQ_base_",unique(rv.Index$data[,6]),"(",unique(rv.Index$data[,3]),")")}
#rnames.temp<-c(paste0("LnQ_base_",unique(rv.Index$data[,5]),"(",unique(rv.Index$data[,2]),")"),paste0("Q_extraSD_",unique(rv.Index$data[,5]),"(",unique(rv.Index$data[,2]),")"))
#rnames.temp[1:length(rnames.temp)%%2 != 0]
if(input$Indexvar)
{
qnames.temp1<-paste0("LnQ_base_",unique(rv.Index$data[,6]),"(",unique(rv.Index$data[,3]),")")
qnames.temp2<-paste0("Q_extraSD_",unique(rv.Index$data[,6]),"(",unique(rv.Index$data[,3]),")")
qnames.temp<-as.vector(rbind(qnames.temp1,qnames.temp2))
# if(length(rnames.temp1)>1)
# {
# for(xx in 2:length(rnames.temp1))
# {
# rnames.temp<-c(rnames.temp1[x],rnames.temp2[x])
# }
# }
rownames(q.lines)<-qnames.temp
}
ctl.file$Q_parms<-q.lines
if(data.file$Nfleets>catch.fleets)
{
if(any(fleet.survey.names=="RSS"))
{
RSS.index<-grep("RSS",fleet.survey.names)
#ctl.file$Q_parms<-ctl.file$Q_parms
ctl.file$size_selex_types[RSS.index,1]<-0 #Rename RSS selectivity types
ctl.file$size_selex_parms<-ctl.file$size_selex_parms[-c((RSS.index*6-5):(RSS.index*6)),] #Remove selectivity related to RSS
}
}
}
# if(input$Data_wt=="Dirichlet")
# {
# Dirichlet.fleets<-c(unique(data.file$lencomp[,3]),(unique(data.file$agecomp[,3])+data.file$Nfleets))
# # if(Dirichlet.fleets>1)
# # {
# # for(i in 1:length(Dirichlet.fleets)){ctl.file$dirichlet_parms<-rbind(ctl.file$dirichlet_parms,ctl.file$dirichlet_parms[1,])}
# # }
# ctl.file$dirichlet_parms[Dirichlet.fleets,3:4]<-0.5
# ctl.file$dirichlet_parms[Dirichlet.fleets,7]<-2
# }
#Change data weights
# Lt_dat_wts<-as.numeric(trimws(unlist(strsplit(input$Lt_datawts,","))))
# ctl.file$Variance_adjustments[1,]<-Lt_dat_wts
#Change likelihood component weight of catch
if (is.null(rv.Ct$data))
{
lts.lambdas<-ctl.file$lambdas[1,]
ct.lambdas<-ctl.file$lambdas[2,]
init.ct.lambdas<-ctl.file$lambdas[3,]
if(data.file$Nfleets>1)
{
for(i_lam in 2:data.file$Nfleets)
{
lts.lambdas_temp<-ctl.file$lambdas[1,]
ct.lambdas_temp<-ct.lambdas[1,]
init.ct.lambdas_temp<-init.ct.lambdas[1,]
lts.lambdas_temp[1,2]<-ct.lambdas_temp[1,2]<-init.ct.lambdas_temp[1,2]<-i_lam
lts.lambdas<-rbind(lts.lambdas,lts.lambdas_temp)
ct.lambdas<-rbind(ct.lambdas,ct.lambdas_temp)
init.ct.lambdas<-rbind(init.ct.lambdas,init.ct.lambdas_temp)
}
}
if(input$Ct_F_LO_select=="Estimate F")
{
if(data.file$Nfleets>1)
{
lt.lam.in<-as.numeric(trimws(unlist(strsplit(input$Wt_fleet_Ct,","))))/sum(as.numeric(trimws(unlist(strsplit(input$Wt_fleet_Ct,",")))))
lt.lam<-lt.lam.in/max(lt.lam.in)
lts.lambdas[,4]<-lt.lam
}
if(data.file$Nfleets==1)
{
lts.lambdas[,4]<-1
}
}
rownames(lts.lambdas)<-paste0("length_Fishery",c(1:data.file$Nfleets),"_sizefreq_method_1_Phz1")
ct.lambdas[,4]<-0
rownames(ct.lambdas)<-paste0("catch_Fishery",c(1:data.file$Nfleets),"_Phz1")
init.ct.lambdas[,4]<-0
rownames(init.ct.lambdas)<-paste0("init_equ_catch_Fishery",c(1:data.file$Nfleets),"_lambda_for_init_equ_catch_can_only_enable/disable for_all_fleets_Phz1")
ctl.file$lambdas<-rbind(lts.lambdas,ct.lambdas,init.ct.lambdas)
ctl.file$N_lambdas<-nrow(ctl.file$lambdas)
# ctl.file$lambdas[1,4]<-0
}
if(!is.null(rv.Ct$data))
{
ct.lambdas<-ctl.file$lambdas[2,]
init.ct.lambdas<-ctl.file$lambdas[3,]
if(data.file$Nfleets>1)
{
for(i_lam in 2:data.file$Nfleets)
{
ct.lambdas_temp<-ct.lambdas[1,]
init.ct.lambdas_temp<-init.ct.lambdas[1,]
ct.lambdas_temp[1,2]<-init.ct.lambdas_temp[1,2]<-i_lam
ct.lambdas<-rbind(ct.lambdas,ct.lambdas_temp)
init.ct.lambdas<-rbind(init.ct.lambdas,init.ct.lambdas_temp)
}
}
ct.lambdas[,4]<-1
rownames(ct.lambdas)<-paste0("catch_Fishery",c(1:data.file$Nfleets),"_Phz1")
init.ct.lambdas[,4]<-0
ctl.file$lambdas<-rbind(ct.lambdas,init.ct.lambdas)
rownames(init.ct.lambdas)<-paste0("init_equ_catch_Fishery",c(1:data.file$Nfleets),"_lambda_for_init_equ_catch_can_only_enable/disable for_all_fleets_Phz1")
ctl.file$N_lambdas<-data.file$Nfleets*2
#ctl.file$lambdas[1,4]<-1
# ctl.file$lambdas[2,4]<-0
ctl.file$init_F[,3]<-0.00000000000000000001
ctl.file$init_F[,7]<--1
}
SS_writectl(ctl.file,paste0("Scenarios/",input$Scenario_name,"/controlfile.ctl"),overwrite=TRUE)
}
}
}
}
####################### END CTL FILE ####################################
if(exists("checkmod")|input$user_model)
{
starter.file<-SS_readstarter(paste0("Scenarios/",input$Scenario_name,"/starter.ss"))
#Use par file
if(input$use_par)
{
starter.file$init_values_src<-1
}
if(!input$use_par|is.null(input$use_par))
{
starter.file$init_values_src<-0
}
#Use datanew file
if(input$use_datanew)
{
starter.file$datfile<-"data_echo.ss_new"
}
if(!input$use_datanew|is.null(input$use_datanew))
{
if(!input$user_model|is.null(input$use_datanew)){starter.file$datfile<-"datafile.dat"}
}
#Use controlnew file
if(input$use_controlnew)
{
starter.file$ctlfile<-"control.ss_new"
}
if(!input$use_controlnew|is.null(input$use_controlnew))
{
if(!input$user_model|is.null(input$use_controlnew)){starter.file$ctlfile<-"controlfile.ctl"}
}
#Phase 0
if(input$use_phase0)
{
starter.file$last_estimation_phase<-0
}
if(!input$use_par|is.null(input$use_par))
{
starter.file$last_estimation_phase<-6
}
#Jitter selection
starter.file$jitter_fraction<-0
# if(input$jitter_choice)
# {
# starter.file$jitter_fraction<-input$jitter_fraction
# starter.file$init_values_src<-0
# }
SS_writestarter(starter.file,paste0("Scenarios/",input$Scenario_name),overwrite=TRUE)
#Forecast file modfications
#Reference points
if(!input$use_forecastnew)
{
forecast.file<-SS_readforecast(paste0("Scenarios/",input$Scenario_name,"/forecast.ss"))
if(input$RP_choices){
forecast.file$SPRtarget<-input$SPR_target
forecast.file$Btarget<-input$B_target
CR_choices<-c("1: Catch fxn of SSB, buffer on F",
"2: F fxn of SSB, buffer on F",
"3: Catch fxn of SSB, buffer on catch",
"4: F fxn of SSB, buffer on catch")
CR_choices_num.vec<-c(1:4)
forecast.file$ControlRuleMethod<-CR_choices_num.vec[CR_choices==input$CR_Ct_F]
forecast.file$SBforconstantF<-input$slope_hi
forecast.file$BfornoF<-input$slope_low
}
if(input$Forecast_choice)
{
forecast.file$Nforecastyrs<-input$forecast_num
buffer.in<-as.numeric(trimws(unlist(strsplit(input$forecast_buffer,","))))
if(length(buffer.in)==1){forecast.file$Flimitfraction<-buffer.in}
if(length(buffer.in)>1)
{
forecast.file$Flimitfraction<--1
buffer.datafr<-data.frame(Year=c((data.file$endyr+1):(data.file$endyr+input$forecast_num)),Fraction=buffer.in)
#rownames(buffer.datafr)<-paste0("#_Flimitfraction_m",1:input$forecast_num)
forecast.file$Flimitfraction_m<-buffer.datafr
}
}
SS_writeforecast(forecast.file,paste0("Scenarios/",input$Scenario_name),overwrite=TRUE)
}
if(input$use_forecastnew)
{
forecast.file<-SS_readforecast(paste0("Scenarios/",input$Scenario_name,"/forecast.ss_new"))
SS_writeforecast(forecast.file,paste0("Scenarios/",input$Scenario_name),overwrite=TRUE)
}
########
#Run Stock Synthesis and plot output
show_modal_spinner(spin="flower",color=wes_palettes$Zissou1[2],text="Model run in progress")
if(input$Data_wt=="None"){DataWT_opt<-"none"}
if(input$Data_wt=="Dirichlet"){DataWT_opt<-"DM"}
if(input$Data_wt=="Francis"){DataWT_opt<-"Francis"}
if(input$Data_wt=="McAllister-Ianelli"){DataWT_opt<-"MI"}
if(is.null(input$no_hess)){
cmd.in<-""
if(input$add_comms==TRUE){cmd.in=paste0(" ",input$add_comms_in)}
RUN.SS(paste0("Scenarios/",input$Scenario_name),ss.cmd=cmd.in,OS.in=input$OS_choice)
if(!file.exists(paste0("Scenarios/",input$Scenario_name,"data_echo.ss_new")))
{
cmd.in<-" -nohess"
if(input$add_comms==TRUE){cmd.in=paste0(" -nohess ",input$add_comms_in)}
RUN.SS(paste0("Scenarios/",input$Scenario_name),ss.cmd=cmd.in,OS.in=input$OS_choice)
}
}
if(!is.null(input$no_hess))
{
if(input$no_hess)
{
cmd.in<-" -nohess"
if(input$add_comms==TRUE){cmd.in=paste0(" -nohess ",input$add_comms_in)}
RUN.SS(paste0("Scenarios/",input$Scenario_name),ss.cmd=cmd.in,OS.in=input$OS_choice)
}
if(!input$no_hess)
{
cmd.in<-""
if(input$add_comms==TRUE){cmd.in=paste0(" ",input$add_comms_in)}
RUN.SS(paste0("Scenarios/",input$Scenario_name),ss.cmd=cmd.in,OS.in=input$OS_choice)
}
}
if(file.exists(paste0("Scenarios/",input$Scenario_name,"/data_echo.ss_new")))
{
Model.output<-try(SS_output(paste0("Scenarios/",input$Scenario_name),verbose=FALSE,printstats = FALSE))
if(class(Model.output)=="try-error")
{
Model.output<-SS_output(paste0("Scenarios/",input$Scenario_name),verbose=FALSE,printstats = FALSE,covar=FALSE)
}
if(input$Data_wt!="None")
{
if(Model.output$inputs$covar==TRUE)
{
tune_comps(Model.output,dir=paste0("Scenarios/",input$Scenario_name),niters_tuning=3,option=DataWT_opt,show_in_console = TRUE,verbose=FALSE)
Model.output<-try(SS_output(paste0("Scenarios/",input$Scenario_name),verbose=FALSE,printstats = FALSE))
}
if(Model.output$inputs$covar==FALSE)
{
tune_comps(Model.output,dir=paste0("Scenarios/",input$Scenario_name),option=DataWT_opt,niters_tuning=3,extras = " -nohess",show_in_console = TRUE,verbose=FALSE)
Model.output<-SS_output(paste0("Scenarios/",input$Scenario_name),verbose=FALSE,printstats = FALSE,covar=FALSE)
}
}
data.file<-SS_readdat(paste0("Scenarios/",input$Scenario_name,"/data_echo.ss_new"))
#No plots or figures
if(is.null(input$no_plots_tables))
{
show_modal_spinner(spin="flower",color=wes_palettes$Zissou1[4],text="Making plots")
SS_plots(Model.output,maxyr=data.file$endyr+1,verbose=FALSE)
}
if(is.null(input$no_tables))
{
show_modal_spinner(spin="flower",color=wes_palettes$Zissou1[5],text="Making tables")
try(SSexecutivesummary(Model.output))
}
if(!is.null(input$no_plots_tables)){
if(input$no_plots_tables==FALSE)
{
#Make SS plots
show_modal_spinner(spin="flower",color=wes_palettes$Zissou1[4],text="Making plots")
SS_plots(Model.output,maxyr=data.file$endyr+1,verbose=FALSE)
}
}
if(!is.null(input$no_tables)){
if(input$no_tables==FALSE)
{
#Make SS tables
show_modal_spinner(spin="flower",color=wes_palettes$Zissou1[5],text="Making tables")
try(SSexecutivesummary(Model.output))
}
}
#Run multiple jitters
if(input$jitter_choice)
{
if(input$Njitter>0)
{
show_modal_spinner(spin="flower",color=wes_palettes$Moonrise1[1],text="Run jitters")
#file.copy(paste0("Scenarios/",input$Scenario_name,"/ss.exe"),paste0("Scenarios/",input$Scenario_name,"/ss_copy.exe"),overwrite = FALSE)
jits<-jitter(
dir=paste0(getwd(),"/Scenarios/",input$Scenario_name),
Njitter=input$Njitter,
printlikes = TRUE,
jitter_fraction=input$jitter_fraction,
init_values_src=0,
verbose=FALSE,
extras = "-nohess"
)
profilemodels <- SSgetoutput(dirvec=paste0("Scenarios/",input$Scenario_name), keyvec=0:input$Njitter, getcovar=FALSE)
profilesummary <- SSsummarize(profilemodels)
minlikes<-profilesummary$likelihoods[1,-length(profilesummary$likelihoods)]==min(profilesummary$likelihoods[1,-length(profilesummary$likelihoods)])
#Find best fit model
index.minlikes<-c(1:length(minlikes))[minlikes]
jitter.likes<-profilesummary$likelihoods[1,-length(profilesummary$likelihoods)]
ref.like<-min(jitter.likes,na.rm = TRUE)
#Make plot and save to folder
main.dir<-getwd()
if(!file.exists(paste0("Scenarios/",input$Scenario_name,"/Jitter Results")))
{
dir.create(paste0("Scenarios/",input$Scenario_name,"/Jitter Results"))
}
setwd(paste0("Scenarios/",input$Scenario_name,"/Jitter Results"))
png("jitterplot.png")
jitterplot<-plot(c(1:length(jitter.likes)),jitter.likes,type="p",col="black",bg="blue",pch=21,xlab="Jitter run",ylab="-log likelihood value",cex=1.25)
points(c(1:length(jitter.likes))[jitter.likes>ref.like],jitter.likes[jitter.likes>ref.like],type="p",col="black",bg="red",pch=21,cex=1.25)
abline(h=ref.like)
# likebc<-round((length(jitter.likes[ref.like==jitter.likes])/(input$Njitter+1))*100,0)
# likelessbc<-round((length(jitter.likes[ref.like>jitter.likes])/(input$Njitter+1))*100,0)
# like10<-round((length(jitter.likes[(ref.like+10)<jitter.likes])/(input$Njitter+1))*100,0)
# like2<-round(((length(jitter.likes[(ref.like+2)>jitter.likes])-(length(jitter.likes[ref.like==jitter.likes])))/(input$Njitter+1))*100,0)
# like_2_10<-round(100-(likebc+like10+like2),0)
# legend("topright",c(paste(" ",likelessbc,"% < BC",sep=""),paste(likebc,"% = BC",sep=""),paste(like2,"% < BC+2",sep=""),paste(like_2_10,"% > BC+2 & < BC+10",sep=""),paste(like10,"% > BC+10",sep="")),bty="n")
dev.off()
save(profilesummary,file=paste0("jitter_summary.DMP"))
SSplotComparisons(profilesummary, legendlabels = c(0:input$Njitter), ylimAdj = 1.30, subplot = c(1), new = FALSE,print=TRUE,plotdir=getwd())
SSplotComparisons(profilesummary, legendlabels = c(0:input$Njitter), ylimAdj = 1.30, subplot = c(3), new = FALSE,print=TRUE,plotdir=getwd())
output$Jitterplot<-renderPlot({
# if(input$Njitter==1){return(NULL)}
# if(input$Njitter>1)
# {
#jitter.likes<-profilesummary$likelihoods[1,-length(profilesummary$likelihoods)]
#ref.like<-min(jitter.likes)
jitterplot<-plot(c(1:length(jitter.likes)),jitter.likes,type="p",col="black",bg="blue",pch=21,xlab="Jitter run",ylab="-log likelihood value",cex=1.25)
points(c(1:length(jitter.likes))[jitter.likes>ref.like],jitter.likes[jitter.likes>ref.like],type="p",col="black",bg="red",pch=21,cex=1.25)
abline(h=ref.like)
# likebc<-round((length(jitter.likes[ref.like==jitter.likes])/(input$Njitter+1))*100,0)
# likelessbc<-round((length(jitter.likes[ref.like>jitter.likes])/(input$Njitter+1))*100,0)
# like10<-round((length(jitter.likes[(ref.like+10)<jitter.likes])/(input$Njitter+1))*100,0)
# like2<-round(((length(jitter.likes[(ref.like+2)>jitter.likes])-(length(jitter.likes[ref.like==jitter.likes])))/(input$Njitter+1))*100,0)
# like_2_10<-round(100-(likebc+like10+like2),0)
# legend("topright",c(paste(" ",likelessbc,"% < BC",sep=""),paste(likebc,"% = BC",sep=""),paste(like2,"% < BC+2",sep=""),paste(like_2_10,"% > BC+2 & < BC+10",sep=""),paste(like10,"% > BC+10",sep="")),bty="n")
# }
})
#Spawning output comp
output$Jittercompplot1<-renderPlot({
SSplotComparisons(profilesummary, legendlabels = c(0:input$Njitter), ylimAdj = 1.30, subplot = c(1), new = FALSE)
})
#Relative stock status comp
output$Jittercompplot2<-renderPlot({
SSplotComparisons(profilesummary, legendlabels = c(0:input$Njitter), ylimAdj = 1.30, subplot = c(3), new = FALSE)
})
#R-run to get new best fit model
show_modal_spinner(spin="flower",color=wes_palettes$Moonrise1[2],text="Re-run best model post-jitters")
file.copy(paste0(main.dir,"/Scenarios/",input$Scenario_name,"/ss.par_",(index.minlikes[1]-1),".sso"),paste0(main.dir,"/Scenarios/",input$Scenario_name,"/ss.par"),overwrite = TRUE)
#file.rename(paste0("Scenarios/",input$Scenario_name,"/ss_copy.exe"),paste0("Scenarios/",input$Scenario_name,"/ss.exe"),overwrite = FALSE)
starter.file$init_values_src<-1
starter.file$jitter_fraction<-0
SS_writestarter(starter.file,paste0(main.dir,"/Scenarios/",input$Scenario_name),overwrite=TRUE)
RUN.SS(paste0(main.dir,"/Scenarios/",input$Scenario_name),ss.cmd="",OS.in=input$OS_choice)
Model.output<-try(SS_output(paste0(main.dir,"/Scenarios/",input$Scenario_name),verbose=FALSE,printstats = FALSE))
if(class(Model.output)=="try-error")
{
Model.output<-SS_output(paste0(main.dir,"/Scenarios/",input$Scenario_name),verbose=FALSE,printstats = FALSE,covar=FALSE)
}
show_modal_spinner(spin="flower",color=wes_palettes$Moonrise1[3],text="Making plots")
SS_plots(Model.output,maxyr=data.file$endyr+1,verbose=FALSE)
show_modal_spinner(spin="flower",color=wes_palettes$Moonrise1[4],text="Making tables")
try(SSexecutivesummary(Model.output))
}
setwd(main.dir)
}
#Add retro runs
# if(input$Retro_choice){
# mydir<-paste0(getwd(),"/Scenarios/")
# model_settings = get_settings(settings = list(base_name = input$Scenario_name,
# run = "retro",
# retro_yrs = input$first_retro_year:input$final_retro_year))
# # tryCatch({
# run_diagnostics(mydir = mydir, model_settings = model_settings)
# # },
# # warning = function(warn){
# # showNotification(paste0(warn), type = 'warning')
# # },
# # error = function(err){
# # showNotification(paste0(err), type = 'err')
# # })
# }
#Convergence diagnostics
output$converge.grad <- renderText({
max.grad<-paste0("Maximum gradient: ",Model.output$maximum_gradient_component)
})
output$converge.covar <- renderText({
covar<-paste0("Was covariance file created? ",Model.output$inputs$covar)
})
output$converge.dec <- renderText({
if(Model.output$maximum_gradient_component<0.1 & Model.output$inputs$covar==TRUE)
{converge.dec<-"Model appears converged. Please check outputs for nonsense."}
else{converge.dec<-"Model may not have converged or inputs are missing. Please use the Jitter option or check/change starting values before re-running model."}
})
#Relative biomass
output$SSout_relSB_table <- renderTable({
SB_indices<-c(which(rownames(Model.output$derived_quants)==paste0("Bratio_",input$endyr)),
which(rownames(Model.output$derived_quants)=="B_MSY/SSB_unfished"),
which(rownames(Model.output$derived_quants)==paste0("SPRratio_",input$endyr)),
which(rownames(Model.output$derived_quants)==paste0("OFLCatch_",(input$endyr+1))),
which(rownames(Model.output$derived_quants)==paste0("ForeCatch_",(input$endyr+1)))
)
Output_relSB_table<-data.frame(Model.output$derived_quants[SB_indices,1:3])
# Label=c(paste0("SO",input$endyr+1,"/SO_0"),
# "SO_MSY/SO_0",
# paste0("SPR",input$endyr+1),
# paste0("OFL",(input$endyr+1)),
# paste0("ABC",(input$endyr+1))
# ))
Output_relSB_table[,1]<-c(paste0("SO",input$endyr,"/SO_0"),
"SO_MSY/SO_0",
paste0("1-SPR",input$endyr),
paste0("OFL",(input$endyr+1)),
paste0("ABC",(input$endyr+1))
)
Output_relSB_table
# rownames=c(expression(SO[input$endyr]/SO[0]),
# expression(SO[MSY]/SO[0]),
# expression(SPR[input$endyr]),
# expression(OFL[input$endyr]),
# expression(ABC[input$endyr])
# ))
# Output_relSB_table[,1]<-c(expression('B',[input$endyr],'/B',[0]),
# expression('B'[MSY]/'B'[0]),
# expression('SPR'[input$endyr]),
# expression('OFL'[input$endyr]),
# expression('ABC'[input$endyr])
# )
})
#F estimate and relative to FMSY and proxies
output$SSout_F_table <- renderTable({
F_indices<-c(which(rownames(Model.output$derived_quants)==paste0("F_",input$endyr)),
which(rownames(Model.output$derived_quants)=="annF_Btgt"),
which(rownames(Model.output$derived_quants)=="annF_SPR"),
which(rownames(Model.output$derived_quants)=="annF_MSY")
)
F_values<-Model.output$derived_quants[F_indices,1:3]
})
#Time series output
output$SSout_table <- renderTable({
# Output_table<-Model.output$sprseries[-nrow(Model.output$sprseries),c(1,5,6,7,8,9,11,12,13,25,37)]
Output_table<-Model.output$sprseries[,c(1,5,6,7,8,9,11,12,13,25,37)]
})
#Paramters
output$Parameters_table <- renderTable({
cbind(rownames(Model.output$estimated_non_dev_parameters),Model.output$estimated_non_dev_parameters)
})
}
if(!file.exists(paste0("Scenarios/",input$Scenario_name,"/data_echo.ss_new")))
{
sendSweetAlert(
session = session,
title = "Model Warning",
text = "Model did not run or Hessian did not invert. Double check data files for errors and each input for missing values (or for 0 SD for lognormal priors) and/or re-run model using a different model specification (e.g., starting values).",
type = "warning")
}
remove_modal_spinner()
observeEvent(exists("Model.output"), {
updateTabsetPanel(session, "tabs",
selected = '2')
})
}
})
###############################################################
### Likelihood profiles, Sensitivities, and Ensemble models ###
###############################################################
roots <- getVolumes()()
#
pathModelout <- reactive({
shinyDirChoose(input, "Modelout_dir", roots= roots,session=session, filetypes=c('', 'txt'))
return(parseDirPath(roots, input$Modelout_dir))
})
observeEvent(as.numeric(input$tabs)==2,{
#observeEvent(exists("Model.output"),{
pathModelout.dir <-pathModelout()
if(!identical(pathModelout.dir, character(0)))
{
#dir.create(paste0(pathModelout.dir,"/Scenarios"))
file.copy(paste0("Scenarios/",input$Scenario_name), pathModelout.dir,recursive=TRUE,overwrite=TRUE)
if(input$Retro_choice){file.copy(paste0("Scenarios/",input$Scenario_name,"_retro"), pathModelout.dir,recursive=TRUE,overwrite=TRUE)}
}
})
########################
### Model efficiency ###
########################
shinyDirChoose(input,"ModEff_dir", roots=roots,session=session, filetypes=c('', 'txt'))
pathRetro <- reactive({
return(parseDirPath(roots, input$ModEff_dir))
})
# if(exists("ModEff_dir")){print(ModEff_dir)}
# observeEvent(as.numeric(input$tabs)==12,{
# output$ModEff_model_pick<-renderUI({
# pickerInput(
# inputId = "myModEff",
# label = "Choose model to evaluate",
# choices = list.files(pathModEff()),
# options = list(
# `actions-box` = TRUE,
# size = 12,
# `selected-text-format` = "count > 3"
# ),
# multiple = TRUE
# )
# })
# })
observeEvent(req(input$run_adnuts),{
modeff.mod.dir<-parseDirPath(roots, input$ModEff_dir) #pathModEff()
modeff.dir<-dirname(modeff.mod.dir)
modeff.name<-paste0(basename(modeff.mod.dir),"_",input$ModEff_choice)
if(dir.exists(file.path(modeff.dir,modeff.name))==FALSE)
{
dir.create(file.path(modeff.dir,modeff.name))
file.copy(list.files(modeff.mod.dir,full.names=TRUE),to=file.path(modeff.dir,modeff.name),recursive=TRUE,overwrite=TRUE)
}
#optimize model
if(input$Opt_mod==TRUE)
{
show_modal_spinner(spin="flower",color=wes_palettes$Rushmore[1],text=paste0("Run initial optimization?"))
RUN.SS(file.path(modeff.dir,modeff.name),ss.cmd="/ss -nox -mcmc 100 -hbf",OS.in=input$OS_choice)
remove_modal_spinner()
}
#Set mcmc model
show_modal_spinner(spin="flower",color=wes_palettes$Rushmore[1],text=paste0("Run ",input$ModEff_choice," model"))
chains <- parallel::detectCores()-1
m<-"ss"
p<-file.path(modeff.dir,modeff.name)
#Run MCMC model with either rwm or nuts
if(input$ModEff_choice=="RWM")
{
fit_model<- sample_rwm(model=m, path=p, iter=input$iter, warmup=0.25*input$iter,
chains=chains, thin=input$thin, duration=NULL)
}
if (input$ModEff_choice=="Nuts")
{
fit_model <- sample_nuts(model=m, path=p, iter=input$iter, warmup=0.25*input$iter,
chains=4, cores=4,control=list(metric='mle', max_treedepth=5),mceval=TRUE)
}
fit.mod.summary<-utils::capture.output(summary(fit_model), file=NULL)
output$fit.model.summary <- renderText({
#paste0(fit.mod.summary[1],fit.mod.summary[2],fit.mod.summary[3])
fit.mod.summary
})
parmax<-10
if(length(fit_model$par_names)<10){parmax<-length(fit_model$par_names)}
png(paste0(p,"/pairs_plot_slow.png"),width=600, height=350)
pairs_admb(fit_model, pars=1:parmax, order='slow')
dev.off()
png(paste0(p,"/pairs_plot_fast.png"),width=600, height=350)
pairs_admb(fit_model, pars=1:parmax, order='fast')
dev.off()
output$pairs_slow <- renderImage({
#region image.path1<-normalizePath(paste0(p,"/pairs_plot_fast.png"),mustWork=FALSE)
return(list(
src = paste0(p,"/pairs_plot_slow.png"),
contentType = "image/png",
# width = 400,
# height = 300,
style='height:60vh'))
},deleteFile=FALSE)
output$pairs_fast <- renderImage({
#region image.path1<-normalizePath(paste0(p,"/pairs_plot_fast.png"),mustWork=FALSE)
return(list(
src = paste0(p,"/pairs_plot_fast.png"),
contentType = "image/png",
# width = 400,
# height = 300,
style='height:60vh'))
},deleteFile=FALSE)
save(fit_model,file=paste0(p,"/fit_model.RData"))
remove_modal_spinner()
#if(input$run_stanout==TRUE){launch_shinyadmb(fit_model)}
})
###########################
### Likelihood profiles ###
###########################
pathLP <- reactive({
shinyDirChoose(input, "LP_dir", roots=roots,session=session, filetypes=c('', 'txt'))
return(parseDirPath(roots, input$LP_dir))
})
observeEvent(as.numeric(input$tabs)==4,{
pathLP.dir <-pathLP()
output$LikeProf_model_picks<-renderUI({
pickerInput(
inputId = "myPicker_LP",
label = "Choose parameters to profile over",
choices = c("Steepness","lnR0","Natural mortality female","Linf female","k female", "CV@Lt young female","CV@Lt old female","Natural mortality male","Linf male","k male", "CV@Lt young male", "CV@Lt old male"),
options = list(
`actions-box` = TRUE,
size = 12,
`selected-text-format` = "count > 3"
),
multiple = TRUE
)
})
})
observeEvent(input$run_Profiles,{
show_modal_spinner(spin="flower",color=wes_palettes$Darjeeling1[1],text="Profiles running")
starter.file<-SS_readstarter(paste0(pathLP(),"/starter.ss"))
#data.file<-SS_readdat(paste0(pathLP(),"/data_echo.ss_new"))
#ctl.file<-SS_readctl(paste0(pathLP(),"/control.ss_new"),use_datlist = TRUE, datlist=data.file)
rep.parms<-SS_output(pathLP(),covar=FALSE,verbose=FALSE)
rep.parms.names<-rownames(rep.parms$parameters)
# SS_parm_names<-c("SR_BH_steep", "SR_LN(R0)","NatM_p_1_Fem_GP_1","L_at_Amax_Fem_GP_1","VonBert_K_Fem_GP_1","CV_young_Fem_GP_1","CV_old_Fem_GP_1","NatM_p_1_Mal_GP_1","L_at_Amax_Mal_GP_1","VonBert_K_Mal_GP_1","CV_young_Mal_GP_1","CV_old_Mal_GP_1")
#SS_parm_names<-c(rownames(ctl.file$SR_parms)[2], rownames(ctl.file$SR_parms)[1],rownames(ctl.file$MG_parms)[1],rownames(ctl.file$MG_parms)[3],rownames(ctl.file$MG_parms)[4],rownames(ctl.file$MG_parms)[5],rownames(ctl.file$MG_parms)[6],rownames(ctl.file$MG_parms)[13],rownames(ctl.file$MG_parms)[15],rownames(ctl.file$MG_parms)[16],rownames(ctl.file$MG_parms)[17],rownames(ctl.file$MG_parms)[18])
SS_parm_names<-c(rep.parms.names[24], rep.parms.names[23],rep.parms.names[1],rep.parms.names[3],rep.parms.names[4],rep.parms.names[5],rep.parms.names[6],rep.parms.names[13],rep.parms.names[15],rep.parms.names[16],rep.parms.names[17],rep.parms.names[18])
parmnames<-input$myPicker_LP
parmnames_vec<-c("Steepness","lnR0","Natural mortality female","Linf female","k female", "CV@Lt young female","CV@Lt old female","Natural mortality male","Linf male","k male", "CV@Lt young male", "CV@Lt old male")
prof_parms_names<-SS_parm_names[parmnames_vec%in%parmnames]
prior_like<-starter.file$prior_like
use_prior_like_in<-rep(0,length(prof_parms_names))
if(prior_like==1){use_prior_like_in = rep(1,length(prof_parms_names))}
mydir = dirname(pathLP())
get = get_settings_profile( parameters = prof_parms_names,
low = as.numeric(trimws(unlist(strsplit(input$Prof_Low_val,",")))),
high = as.numeric(trimws(unlist(strsplit(input$Prof_Hi_val,",")))),
step_size = as.numeric(trimws(unlist(strsplit(input$Prof_step,",")))),
param_space = rep('real',length(as.numeric(trimws(unlist(strsplit(input$Prof_Low_val,",")))))),
use_prior_like = use_prior_like_in
)
model_settings = get_settings(settings = list(base_name = basename(pathLP()),
run = "profile",
profile_details = get))
try(run_diagnostics(mydir = mydir, model_settings = model_settings))
file.remove(paste0(dirname(mydir),"/run_diag_warning.txt"))
output$LikeProf_plot_modout <- renderImage({
image.path1<-normalizePath(file.path(paste0(pathLP(),"_profile_",prof_parms_names[1],"/parameter_panel_",prof_parms_names[1],".png")),mustWork=FALSE)
return(list(
src = image.path1,
contentType = "image/png",
# width = 400,
# height = 300,
style='height:60vh'))
},deleteFile=FALSE)
output$LikeProf_plot_Piner <- renderImage({
image.path2<-normalizePath(file.path(paste0(pathLP(),"_profile_",prof_parms_names[1],"/piner_panel_",prof_parms_names[1],".png")),mustWork=FALSE)
return(list(
src = image.path2,
contentType = "image/png",
# width = 400,
# height = 300,
style='height:60vh'))
},deleteFile=FALSE)
output$LikeProf_plot_SO <- renderImage({
image.path3<-normalizePath(file.path(paste0(pathLP(),"_profile_",prof_parms_names[1],"/",prof_parms_names[1],"_trajectories_compare1_spawnbio.png")),mustWork=FALSE)
return(list(
src = image.path3,
contentType = "image/png",
# width = 400,
# height = 300,
style='height:60vh'))
},deleteFile=FALSE)
output$LikeProf_plot_SOt_SO0 <- renderImage({
image.path4<-normalizePath(file.path(paste0(pathLP(),"_profile_",prof_parms_names[1],"/",prof_parms_names[1],"_trajectories_compare3_Bratio.png")),mustWork=FALSE)
return(list(
src = image.path4,
contentType = "image/png",
# width = 400,
# height = 300,
style='height:60vh'))
},deleteFile=FALSE)
remove_modal_spinner()
})
observeEvent(input$run_MultiProfiles,{
show_modal_spinner(spin="flower",color=wes_palettes$Darjeeling1[2],text="Multi-profiles running")
refdir<-pathLP()
mydir <- dirname(refdir)
#Read in reference model
ref.model<-SS_output(refdir)
#Read in parameter files
par.df <- fread(input$file_multi_profile$datapath,check.names=FALSE,data.table=FALSE)
L <- readLines(input$file_multi_profile$datapath, n = 1)
if(grepl(";", L)) {par.df <- read.csv2(input$file_multi_profile$datapath,check.names=FALSE)}
SS_parm_names<-rownames(ref.model$parameters)[c(23:24,1,3,4:6,13,15:18)]
parmnames_vec<-c("Steepness","lnR0","Natural mortality female","Linf female","k female", "CV@Lt young female","CV@Lt old female","Natural mortality male","Linf male","k male", "CV@Lt young male", "CV@Lt old male")
parmnames<-colnames(par.df)
prof_parms_names<-SS_parm_names[parmnames_vec%in%parmnames]
modelnames<-paste0(parmnames[1]," ",par.df[,1],";",parmnames[2]," ",par.df[,2])
#Make new folder
#para = rownames(model_settings$profile_details)[aa]
profile_dir <- paste0(refdir,"_profile_", paste(prof_parms_names,collapse="_"))
dir.create(profile_dir, showWarnings = FALSE)
if (length(list.files(profile_dir)) !=0)
{
remove <- list.files(profile_dir)
file.remove(file.path(profile_dir, remove))
}
all_files <- list.files(refdir)
file.copy(from = file.path(refdir,all_files), to = profile_dir, overwrite = TRUE)
#Set-up the starter file control file
starter.file<-SS_readstarter(paste0(profile_dir,"/starter.ss"))
starter.file$ctlfile<-"control_modified.ss"
starter.file$init_values_src<-0
starter.file$prior_like<-1
SS_writestarter(starter.file,profile_dir,overwrite=TRUE)
# low_in <- as.numeric(trimws(unlist(strsplit(input$Prof_Low_val,",")))),
# high_in <- as.numeric(trimws(unlist(strsplit(input$Prof_Hi_val,",")))),
# step_size_in <- as.numeric(trimws(unlist(strsplit(input$Prof_step,","))))
# par.df<-data.frame(mapply(function(x) seq(low[x],high[x],step_size[x]),x=1:length(low)))
# colnames(par.df)<-prof_parms_names
if(input$Hess_multi_like==FALSE)
{
profile <- profile(
dir = profile_dir, # directory
masterctlfile = "control.ss_new",
newctlfile = "control_modified.ss",
string = prof_parms_names,
profilevec = par.df,
extras = "-nohess",
prior_check=TRUE,
show_in_console = TRUE
)
}
if(input$Hess_multi_like==TRUE)
{
profile <- profile(
dir = profile_dir, # directory
masterctlfile = "control.ss_new",
newctlfile = "control_modified.ss",
string = prof_parms_names,
profilevec = par.df,
prior_check=TRUE,
show_in_console = TRUE
)
}
# get model output
profilemodels <- SSgetoutput(dirvec=profile_dir,keyvec=1:nrow(par.df), getcovar=FALSE)
n <- length(profilemodels)
profilesummary <- SSsummarize(profilemodels)
try(SSplotComparisons(profilesummary, legendlabels = modelnames, ylimAdj = 1.30, new = FALSE,plot=FALSE,print=TRUE, legendloc = 'topleft',uncertainty=TRUE,plotdir=profile_dir,btarg=TRP_multi_like,minbthresh=LRP_multi_like))
save(profilesummary,file=paste0(profile_dir,"/multiprofile.DMP"))
# add total likelihood (row 1) to table created above
par.df$like <- as.numeric(profilesummary$likelihoods[1, 1:n])
par.df$likediff <- as.numeric(profilesummary$likelihoods[1, 1:n]-ref.model$likelihoods_used[1,1])
par.df$Bratio <- as.numeric(profilesummary$Bratio[grep((profilesummary$endyrs[1]),profilesummary$Bratio$Label), 1:n])
par.df$SB0 <- as.numeric(profilesummary$SpawnBio[1, 1:n])
par.df$SBcurrent <- as.numeric(profilesummary$SpawnBio[grep((profilesummary$endyrs[1]),profilesummary$SpawnBio$Label), 1:n])
SBcurrmax<-max(par.df$SBcurrent)
colnames(par.df)<-c(parmnames,c("Likelihood","Likelihood_difference",paste0("SB",profilesummary$endyrs[1],"/SB0"),"SB0",paste0("SB",profilesummary$endyrs[1])))
save(par.df,file=paste0(profile_dir,"/multiprofilelikelihoods.DMP"))
write.csv(par.df,file=paste0(profile_dir,"/multiprofilelikelihoods.csv"))
#This reactive object is needed to get the plots to work
plot.dat<-reactive({
plot.dat<-melt(par.df,id.vars=c( colnames(par.df)[1:2]),measure.vars=c("Likelihood_difference",paste0("SB",profilesummary$endyrs[1],"/SB0"),"SB0",paste0("SB",profilesummary$endyrs[1])))
plot.dat
})
blank_data<- data.frame(variable = c("Likelihood_difference", "Likelihood_difference", paste0("SB",profilesummary$endyrs[1],"/SB0"), paste0("SB",profilesummary$endyrs[1],"/SB0"), "SB0", "SB0",paste0("SB",profilesummary$endyrs[1]),paste0("SB",profilesummary$endyrs[1])), x =min(par.df[,1]),y = c(min(par.df$Likelihood_difference),max(par.df$Likelihood_difference), 0, 1, 0, ceiling(max(par.df$SB0)),0,ceiling(SBcurrmax)))
blank_data$variable<-factor(blank_data$variable,c("Likelihood_difference",paste0("SB",profilesummary$endyrs[1],"/SB0"),"SB0",paste0("SB",profilesummary$endyrs[1])))
refmodel.dat<-data.frame(variable = c("Likelihood_difference",paste0("SB",profilesummary$endyrs[1],"/SB0"),"SB0",paste0("SB",profilesummary$endyrs[1])), x =ref.model$parameters[grep(prof_parms_names[1],ref.model$parameters$Label),3],y = c(0,ref.model$sprseries$Deplete[grep((profilesummary$endyrs[1]),profilesummary$Bratio$Label)+1],ref.model$SBzero,ref.model$derived_quants[grep((profilesummary$endyrs[1]),profilesummary$SpawnBio$Label),2]))
#multiprofplotfun<-function(plot.dat)
#{
output$LikeProf_multiplot <- renderPlot({
multiplot<-ggplot(plot.dat(),aes(plot.dat()[,1],value))+
geom_line(lwd=1.25)+
facet_wrap(~variable,scales="free_y")+
geom_blank(data = blank_data, aes(x = x, y = y,z="variable"))+
ylab("Difference in -log likelihood")+
scale_x_continuous(name = paste(parmnames[1],"and",parmnames[2]),
breaks =par.df[,1],
labels = paste0(par.df[,1],"\n",par.df[,2]))+
geom_hline(data = data.frame(yint=c(-1.96,0,1.96,0.4,0.25),variable=c("Likelihood_difference","Likelihood_difference","Likelihood_difference",paste0("SB",profilesummary$endyrs[1],"/SB0"),paste0("SB",profilesummary$endyrs[1],"/SB0"))),
aes(yintercept = yint), linetype = c("solid","dotted","solid","dotted","solid"),color=c("red","black","red","darkgreen","red"),lwd=1)+
geom_point(data=refmodel.dat,aes(x=x,y=y),color="blue",size=4)+
theme_bw()
ggsave(paste0(profile_dir,"/","multilikelihood_profile.png"),width=10,height=10,units="in")
multiplot
})
#}
# output$LikeProf_multiplot <- renderPlot({
# plotPNG(func=multiprofplotfun(plot.dat()),paste0(profile_dir,"/",paste(parmnames,collapse="_"),"_multilikelihood_profile.png"))
# })
# plot.dat2<-reactive({
# plot.dat2<-melt(par.df,id.vars=c( colnames(par.df)[1:2]),measure.vars=c("Likelihood_difference",paste0("SB",profilesummary$endyrs[1]-1,"/SB0"),"SB0",paste0("SB",profilesummary$endyrs[1]-1)))
# plot.dat2
# })
# png(file = paste0(profile_dir,"/","multilikelihood_profile.png"),width = 10, height = 10, units = "in", res = 300, pointsize = pt)
# # multiplot
# ggplot(plot.dat2(),aes(plot.dat2()[,1],value))+
# geom_line(lwd=1.25)+
# facet_wrap(~variable,scales="free_y")+
# #geom_blank(data = blank_data, aes(x = x, y = y,z="variable"))+
# ylab("Difference in -log likelihood")+
# #scale_x_continuous(name = paste(parmnames[1],"and",parmnames[2]),
# # breaks =par.df[,1],
# # labels = paste0(par.df[,1],"\n",par.df[,2]))+
# geom_hline(data = data.frame(yint=c(-1.96,0,1.96,0.4,0.25),variable=c("Likelihood_difference","Likelihood_difference","Likelihood_difference",paste0("SB",profilesummary$endyrs[1]-1,"/SB0"),paste0("SB",profilesummary$endyrs[1]-1,"/SB0"))),
# aes(yintercept = yint), linetype = c("solid","dotted","solid","dotted","solid"),color=c("red","black","red","darkgreen","red"),lwd=1)+
# #geom_point(data=refmodel.dat,aes(x=x,y=y),color="blue",size=3)+
# theme_bw() # multiprofplot
#dev.off()
# png(file = paste0(profile_dir,"/",paste(parmnames,collapse="_"),"_multilikelihood_profile.png"),width = 10, height = 10, units = "in", res = 300, pointsize = pt)
# output$LikeProf_multiplot <- renderImage({
# image.path<-normalizePath(file.path(paste0(profile_dir,paste0("\\",paste(parmnames,collapse="_"),"_multilikelihood_profile.png"))),mustWork=FALSE)
# return(list(
# src = image.path,
# contentType = "image/png",
# # width = 400,
# # height = 300,
# style='height:60vh'))
# },deleteFile=FALSE)
# reshape data frame into a matrix for use with contour
# pngfun(wd = mydir, file = paste0("contour_profile.png"), h = 7,w = 12)
# contour(x = as.numeric(rownames(like_matrix)),
# y = as.numeric(colnames(like_matrix)),
# z = like_matrix)
# dev.off()
# make contour plot
# output$LikeProf_multi_contour <- renderPlot({
# like_matrix <- reshape2::acast(par.df, colnames(par.df)[1]~colnames()[2], value.var="like")
# pngfun(wd = mydir, file = paste0("contour_profile.png"), h = 7,w = 12)
# contour(x = as.numeric(rownames(like_matrix)),
# y = as.numeric(colnames(like_matrix)),
# z = like_matrix)
# dev.off()
# })
remove_modal_spinner()
})
#################
###############################
####### Retrospectives ########
###############################
shinyDirChoose(input,"Retro_dir", roots=roots,session=session, filetypes=c('', 'txt'))
pathRetro <- reactive({
return(parseDirPath(roots, input$Retro_dir))
})
observeEvent(input$run_Retro_comps,{
#if(input$run_Retro_comps){
show_modal_spinner(spin="flower",color=wes_palettes$Royal1[1],text="Running retrospectives")
mydir_in<-dirname(pathRetro())
scenario_in<-basename(pathRetro())
model_settings = get_settings(settings = list(base_name = scenario_in,
run = "retro",
retro_yrs = input$first_retro_year_in:input$final_retro_year_in))
run_diagnostics(mydir = mydir_in, model_settings = model_settings)
# tryCatch({
# run_diagnostics(mydir = mydir_in, model_settings = model_settings)
# },
# warning = function(warn){
# showNotification(paste0(warn), type = 'warning')
# },
# error = function(err){
# showNotification(paste0(err), type = 'err')
# })
#}
output$Retro_comp_plotSB <- renderImage({
image.path<-normalizePath(file.path(paste0(pathRetro(),"_retro/compare2_spawnbio_uncertainty.png")),mustWork=FALSE)
return(list(
src = image.path,
contentType = "image/png",
# width = 400,
# height = 300,
style='height:60vh'))
},deleteFile=FALSE)
output$Retro_comp_plotBratio <- renderImage({
image.path<-normalizePath(file.path(paste0(pathRetro(),"_retro/compare4_Bratio_uncertainty.png")),mustWork=FALSE)
return(list(
src = image.path,
contentType = "image/png",
# width = 400,
# height = 300,
style='height:60vh'))
},deleteFile=FALSE)
remove_modal_spinner()
})
##############################
###############################
### Sensitivity comparisons ###
###############################
pathSensi <- reactive({
shinyDirChoose(input, "Sensi_dir", roots=roots,session=session, filetypes=c('', 'txt'))
return(parseDirPath(roots, input$Sensi_dir))
})
observeEvent(as.numeric(input$tabs)==6,{
output$Sensi_model_Ref<-renderUI({
#dirinfo <- parseDirPath(roots, input$Sensi_dir)
pickerInput(
inputId = "myPicker_Ref",
label = "Choose reference model",
#choices = list.files(dirinfo),
choices = list.files(pathSensi()),
options = list(
`actions-box` = TRUE,
size = 12,
`selected-text-format` = "count > 3"
),
multiple = FALSE
)
})
})
observeEvent(!is.null(input$myPicker_Ref),{
# observeEvent(as.numeric(input$tabs)==6,{
output$Sensi_model_picks<-renderUI({
#dirinfo <- parseDirPath(roots, input$Sensi_dir)
pickerInput(
inputId = "myPicker",
label = "Choose scenarios to compare to reference model",
#choices = list.files(dirinfo),
choices = list.files(pathSensi()),
options = list(
`actions-box` = TRUE,
size = 12,
`selected-text-format` = "count > 3"
),
multiple = TRUE
)
})
})
#SS.comparisons<-observeEvent(as.numeric(input$tabs)==5,{
Sensi_model_dir_out<-eventReactive(req(input$run_Sensi_comps&!is.null(input$myPicker)&as.numeric(input$tabs)==6),{
if(!file.exists(paste0(pathSensi(),"/Sensitivity Comparison Plots")))
{
dir.create(paste0(pathSensi(),"/Sensitivity Comparison Plots"))
}
Sensi_model_dir_out_Ref<-paste0(pathSensi(),"/",input$myPicker_Ref)
Sensi_model_dir_sensi<-paste0(pathSensi(),"/",input$myPicker)
Sensi_model_dir<-c(Sensi_model_dir_out_Ref,Sensi_model_dir_sensi)
Sensi_model_dir
})
#&exists(Sensi_model_dir_out())
observeEvent(req(input$run_Sensi_comps),{
show_modal_spinner(spin="flower",color=wes_palettes$Rushmore[1],text="Comparisons running")
modelnames<-c(input$myPicker_Ref,input$myPicker)
zz<-list()
Runs<-length(Sensi_model_dir_out())
for(i in 1:Runs) {zz[[i]]<-SS_output(paste0(Sensi_model_dir_out()[i]))}
modsummary.sensi<- SSsummarize(zz)
col.vec = rc(n=length(modelnames), alpha = 1)
shade = adjustcolor(col.vec[1], alpha.f = 0.10)
TRP.in<-input$Sensi_TRP
LRP.in<-input$Sensi_LRP
if(is.na(TRP.in)){TRP.in<-0}
if(is.na(LRP.in)){LRP.in<-0}
dir.create(paste0(pathSensi(),"/Sensitivity Comparison Plots/",input$Sensi_comp_file))
#Sensi_uncertainty_choice<-input$Sensi_uncertainty_choice
#if (all(is.na(quantsSD[, i]) | quantsSD[, i] == 0))
Sensi_uncertainty_choice<-TRUE
pngfun(wd = paste0(pathSensi(),"/Sensitivity Comparison Plots/",input$Sensi_comp_file), file = paste0(input$Sensi_comp_file,".png"), h = 7,w = 12)
par(mfrow = c(1,3))
try(SSplotComparisons(modsummary.sensi, legendlabels = modelnames, ylimAdj = 1.30, subplot = c(2,4),col = col.vec, new = FALSE,btarg=TRP.in,minbthresh=LRP.in,uncertainty=Sensi_uncertainty_choice))
try(SSplotComparisons(modsummary.sensi, legendlabels = modelnames, ylimAdj = 1.30, subplot = 11,col = col.vec, new = FALSE, legendloc = 'topleft',btarg=TRP.in,minbthresh=LRP.in,uncertainty=Sensi_uncertainty_choice))
dev.off()
try(SSplotComparisons(modsummary.sensi, legendlabels = modelnames, ylimAdj = 1.30,col = col.vec, new = FALSE,print=TRUE, legendloc = 'topleft',btarg=TRP.in,minbthresh=LRP.in,uncertainty=Sensi_uncertainty_choice,plotdir=paste0(pathSensi(),"/Sensitivity Comparison Plots/",input$Sensi_comp_file)))
save(modsummary.sensi,file=paste0(pathSensi(),"/Sensitivity Comparison Plots/",input$Sensi_comp_file,"/",input$Sensi_comp_file,".DMP"))
pngfun(wd = paste0(pathSensi(),"/Sensitivity Comparison Plots/",input$Sensi_comp_file), file = paste0(input$Sensi_comp_file,"_no_uncertainty.png"), h = 7,w = 12)
par(mfrow = c(1,3))
try(SSplotComparisons(modsummary.sensi, legendlabels = modelnames, ylimAdj = 1.30, subplot = c(1,3),col = col.vec, new = FALSE,btarg=TRP.in,minbthresh=LRP.in,uncertainty=Sensi_uncertainty_choice))
try(SSplotComparisons(modsummary.sensi, legendlabels = modelnames, ylimAdj = 1.30, subplot = 11,col = col.vec, new = FALSE, legendloc = 'topleft',btarg=TRP.in,minbthresh=LRP.in,uncertainty=Sensi_uncertainty_choice))
dev.off()
output$Sensi_comp_plot <- renderImage({
if (all(is.na(modsummary.sensi$quantsSD[, 1]) | modsummary.sensi$quantsSD[, 1] == 0))
{
image.path<-normalizePath(file.path(paste0(pathSensi(),"/Sensitivity Comparison Plots/",input$Sensi_comp_file,"/",input$Sensi_comp_file, '_no_uncertainty.png')),mustWork=FALSE)
return(list(
src = image.path,
contentType = "image/png",
# width = 400,
# height = 300,
style='height:60vh'))
}
else
{
image.path<-normalizePath(file.path(paste0(pathSensi(),"/Sensitivity Comparison Plots/",input$Sensi_comp_file,"/",input$Sensi_comp_file, '.png')),mustWork=FALSE)
return(list(
src = image.path,
contentType = "image/png",
# width = 400,
# height = 300,
style='height:60vh'))
}
},deleteFile=FALSE)
#Relative error sensitivity plots
SensiRE_breaks_in<-as.numeric(trimws(unlist(strsplit(input$SensiRE_breaks,","))))
SensiRE_xcenter_in<-as.numeric(trimws(unlist(strsplit(input$SensiRE_xcenter,","))))
SensiRE_ycenter_in<-as.numeric(trimws(unlist(strsplit(input$SensiRE_ycenter,","))))
SensiRE_headers_in<-trimws(unlist(strsplit(input$SensiRE_headers,",")))
yminmax_sensi<-rep(c(input$SensiRE_ymin,input$SensiRE_ymax),5)
r4ss::SS_Sensi_plot(dir=paste0(pathSensi(),"/Sensitivity Comparison Plots/",input$Sensi_comp_file,"/"),
model.summaries=modsummary.sensi,
current.year=modsummary.sensi$endyrs[1]+1,
mod.names=modelnames, #List the names of the sensitivity runs
#likelihood.out=c(0,0,0),
Sensi.RE.out="Sensi_RE_out.DMP", #Saved file of relative errors
CI=0.95, #Confidence interval box based on the reference model
TRP.in=input$Sensi_TRP, #Target relative abundance value
LRP.in=input$Sensi_LRP, #Limit relative abundance value
sensi_xlab="Sensitivity scenarios", #X-axis label
ylims.in=yminmax_sensi, #Y-axis label
plot.figs=c(1,1,1,1,1,1), #Which plots to make/save?
sensi.type.breaks=SensiRE_breaks_in, #vertical breaks that can separate out types of sensitivities
anno.x=SensiRE_xcenter_in, # Vertical positioning of the sensitivity types labels
anno.y=SensiRE_ycenter_in, # Horizontal positioning of the sensitivity types labels
anno.lab=SensiRE_headers_in #Sensitivity types labels
)
output$SensiRE_comp_plot <- renderImage({
image.path<-normalizePath(file.path(paste0(pathSensi(),"/Sensitivity Comparison Plots/",input$Sensi_comp_file,"/Sensi_REplot_SB_Dep_F_MSY.png")),mustWork=FALSE)
return(list(
src = image.path,
contentType = "image/png",
width = 800,
height = 1200,
style='height:60vh'))
},deleteFile=FALSE)
output$SensiRElog_comp_plot <- renderImage({
image.path<-normalizePath(file.path(paste0(pathSensi(),"/Sensitivity Comparison Plots/",input$Sensi_comp_file,"/Sensi_logREplot_SB_Dep_F_MSY.png")),mustWork=FALSE)
return(list(
src = image.path,
contentType = "image/png",
width = 400,
height = 300,
style='height:60vh'))
},deleteFile=FALSE)
remove_modal_spinner()
})
#############################
#############################
# image.path<-eventReactive(exists(file.path(paste0(path1(),"/Sensitivity Comparison Plots/",
# input$Sensi_comp_file, '.png'))),{
# image.path<-normalizePath(file.path(paste0(path1(),"/Sensitivity Comparison Plots/",
# input$Sensi_comp_file, '.png')),mustWork=FALSE)
# })
# output$Sensi_comp_plot <- renderImage({
# image.path<-normalizePath(file.path(paste0(path1(),"/Sensitivity Comparison Plots/",
# input$Sensi_comp_file, '.png')),mustWork=FALSE)
# return(list(
# src = image.path,
# contentType = "image/png",
# # width = 400,
# # height = 300,
# style='height:60vh'))
# print(input$run_Sensi_comps[1])
# },deleteFile=FALSE)
####################################
##########################
### Ensemble modelling ###
##########################
pathEnsemble <- reactive({
shinyDirChoose(input, "Ensemble_dir", roots=roots, filetypes=c('', 'txt'))
return(parseDirPath(roots, input$Ensemble_dir))
})
#Used to have as.numeric(input$tabs)==4
observeEvent(as.numeric(input$tabs)==7,{
output$Ensemble_model_picks<-renderUI({
pickerInput(
inputId = "myEnsemble",
label = "Choose scenarios to ensemble",
choices = list.files(pathEnsemble()),
options = list(
`actions-box` = TRUE,
size = 12,
`selected-text-format` = "count > 3"
),
multiple = TRUE
)
})
})
#Ensemble_model_dir_out<-eventReactive(req(input$run_Ensemble&!is.null(input$myEnsemble)&as.numeric(input$tabs)==6),{
observeEvent(req(input$run_Ensemble&!is.null(input$myEnsemble)&as.numeric(input$tabs)==7),{
show_modal_spinner(spin="flower",color=wes_palettes$Rushmore[1],text="Prepare models to combine into ensembles")
#Ensemble_model_dir_out<-eventReactive(input$run_Ensemble,{
#Ensemble.outputs<-eventReactive(input$run_Ensemble,{
if(!file.exists(paste0(pathEnsemble(),"/Ensemble outputs")))
{
dir.create(paste0(pathEnsemble(),"/Ensemble outputs"))
}
Ensemble_model_dir_out<-paste0(pathEnsemble(),"/Ensemble outputs/",input$Ensemble_file)
dir.create(Ensemble_model_dir_out)
# })
# print(Ensemble_model_dir_out())
# exists("Ensemble_model_dir_out()")
#Ensemble_model_dir_out
#})
#exists(Ensemble_model_dir_out())
# observeEvent(req(input$run_Ensemble&!is.null(input$myEnsemble)),{
# Ensemble.outputs<-eventReactive(input$run_Ensemble,{
modelnames<-input$myEnsemble
zz<-list()
Runs<-length(input$myEnsemble)
for(i in 1:Runs) {zz[[i]]<-SS_output(paste0(pathEnsemble(),"/",input$myEnsemble[i]))}
modsummary.ensemble<- SSsummarize(zz)
Ensemble_wts<-as.numeric(trimws(unlist(strsplit(input$Ensemble_wts,","))))
Stand_ensemble_wts<-Ensemble_wts/sum(Ensemble_wts)
Nsamps_ensemble<-10000
Nsamps_ensemble_wts<-round(Nsamps_ensemble*Stand_ensemble_wts)
#Calculate weighted values
mean.fxn <- function(x, y) rnorm(numdraws, mean = x, sd = y)
#Spawning outputs
#Bratio
SpOt_en<-Bratio_en<-F_en<-SPR_en<-list()
SO_0<-SO_t<-Bratio_t<-F_t<-SPR_t<-data.frame(Year=NA,Metric=NA,Model=NA)
#Create weighted ensembles
for (i in 1:length(Nsamps_ensemble_wts))
{
numdraws<-Nsamps_ensemble_wts[i]
SpOt_en[[i]]<-Map(mean.fxn,modsummary.ensemble$SpawnBio[,i],modsummary.ensemble$SpawnBioSD[,i])
names(SpOt_en[[i]])<-modsummary.ensemble$SpawnBio$Yr
SO_0<-rbind(SO_0,data.frame(Year=as.numeric(names(SpOt_en[[i]][1])),Metric=unlist(SpOt_en[[i]][1]),Model=input$myEnsemble[i]))
SO_t<-rbind(SO_t,data.frame(Year=names(SpOt_en[[i]][nrow(modsummary.ensemble$SpawnBio)]),Metric=unlist(SpOt_en[[i]][length(Nsamps_ensemble_wts)]),Model=input$myEnsemble[i]))
Bratio_en[[i]]<-Map(mean.fxn,modsummary.ensemble$Bratio[,i],modsummary.ensemble$BratioSD[,i])
names(Bratio_en[[i]])<-modsummary.ensemble$Bratio$Yr
Bratio_t<-rbind(Bratio_t,data.frame(Year=names(Bratio_en[[i]][nrow(modsummary.ensemble$Bratio)]),Metric=unlist(Bratio_en[[i]][nrow(modsummary.ensemble$Bratio)]),Model=input$myEnsemble[i]))
F_en[[i]]<-Map(mean.fxn,modsummary.ensemble$Fvalue[,i],modsummary.ensemble$FvalueSD[,i])
names(F_en[[i]])<-modsummary.ensemble$Fvalue$Yr
F_t<-rbind(F_t,data.frame(Year=names(F_en[[i]][nrow(modsummary.ensemble$Fvalue)]),Metric=unlist(F_en[[i]][nrow(modsummary.ensemble$Fvalue)]),Model=input$myEnsemble[i]))
SPR_en[[i]]<-Map(mean.fxn,modsummary.ensemble$SPRratio[,i],modsummary.ensemble$SPRratioSD[,i])
names(SPR_en[[i]])<-modsummary.ensemble$SPRratio$Yr
SPR_t<-rbind(SPR_t,data.frame(Year=names(SPR_en[[i]][nrow(modsummary.ensemble$SPRratio)]),Metric=unlist(SPR_en[[i]][nrow(modsummary.ensemble$SPRratio)]),Model=input$myEnsemble[i]))
}
#Reduce(intersect,list(names(list1),names(list2),names(list3))) # Code to find matches in multiple vectors. For future option of mixing models with different dimensions.
#Assemble ensembles
Ensemble_SO<-SpOt_en[[1]]
Ensemble_Bratio<-Bratio_en[[1]]
Ensemble_F<-F_en[[1]]
Ensemble_SPR<-SPR_en[[1]]
for(ii in 2:length(Nsamps_ensemble_wts))
{
Ensemble_SO<-mapply(c,Ensemble_SO,SpOt_en[[ii]])
Ensemble_Bratio<-mapply(c,Ensemble_Bratio,Bratio_en[[ii]])
Ensemble_F<-mapply(c,Ensemble_F,F_en[[ii]])
Ensemble_SPR<-mapply(c,Ensemble_SPR,SPR_en[[ii]])
}
SO_0<-rbind(SO_0[-1,],data.frame(Year=as.numeric(colnames(Ensemble_SO)[1]),Metric=Ensemble_SO[,1],Model="Ensemble"))
SO_t<-rbind(SO_t[-1,],data.frame(Year=as.numeric(colnames(Ensemble_SO)[ncol(Ensemble_SO)]),Metric=Ensemble_SO[,ncol(Ensemble_SO)],Model="Ensemble"))
Bratio_t<-rbind(Bratio_t[-1,],data.frame(Year=as.numeric(colnames(Ensemble_Bratio)[ncol(Ensemble_Bratio)]),Metric=Ensemble_Bratio[,ncol(Ensemble_Bratio)],Model="Ensemble"))
F_t<-rbind(F_t[-1,],data.frame(Year=as.numeric(colnames(Ensemble_F)[ncol(Ensemble_F)]),Metric=Ensemble_F[,ncol(Ensemble_F)],Model="Ensemble"))
SPR_t<-rbind(SPR_t[-1,],data.frame(Year=as.numeric(colnames(Ensemble_SPR)[ncol(Ensemble_SPR)]),Metric=Ensemble_SPR[,ncol(Ensemble_SPR)],Model="Ensemble"))
SO_0$Year<-as.factor(SO_0$Year)
SO_t$Year<-as.factor(SO_t$Year)
Bratio_t$Year<-as.factor(Bratio_t$Year)
F_t$Year<-as.factor(F_t$Year)
SPR_t$Year<-as.factor(SPR_t$Year)
# mean_cl_quantile <- function(x, q = c(0.1, 0.9), na.rm = TRUE){
# dat <- data.frame(y = mean(x, na.rm = na.rm),
# ymin = quantile(x, probs = q[1], na.rm = na.rm),
# ymax = quantile(x, probs = q[2], na.rm = na.rm))
# return(dat)
# }
show_modal_spinner(spin="flower",color=wes_palettes$Rushmore[2],text="Preparing ensemble plots")
#Boxplots
gg1<-ggplot(SO_0,aes(Model,Metric))+
geom_violin()+
ylab("Initial Spawning Output")
gg2<-ggplot(SO_t,aes(Model,Metric))+
geom_violin()+
ylab("Terminal Year Spawning Output")
gg3<-ggplot(Bratio_t,aes(Model,Metric))+
geom_violin()+
ylab("Relative stock status")
gg4<-ggplot(F_t,aes(Model,Metric))+
geom_violin()+
ylab("Fishing mortality")
gg5<-ggplot(SPR_t,aes(Model,Metric))+
geom_violin()+
ylab("1-SPR")
ggarrange(gg1,gg2,gg3,gg4,gg5)
ggsave(paste0(Ensemble_model_dir_out,"/Ensemble_comp_plots.png"))
output$Ensemble_plots <- renderPlot({
ggarrange(gg1,gg2,gg3,gg4,gg5)})
#Spawning Output plot
Ensemble_SO_plot<-reshape2::melt(Ensemble_SO,value.name="SO")
colnames(Ensemble_SO_plot)[2]<-"Year"
Ensemble_SO_plot$Year<-as.factor(Ensemble_SO_plot$Year)
ggplot(Ensemble_SO_plot,aes(Year,SO,fill=Year))+
geom_violin()+
theme(legend.position="none")+
theme(axis.text.x = element_text(angle = 45, hjust = 1,vjust=0.5,size=10))+
ylab("Spawning Output")
ggsave(paste0(Ensemble_model_dir_out,"/Ensemble_SO.png"))
#Relative stock status plot
Ensemble_Bratio_plot<-reshape2::melt(Ensemble_Bratio,value.name="Bratio")
colnames(Ensemble_Bratio_plot)[2]<-"Year"
Ensemble_Bratio_plot$Year<-as.factor(Ensemble_Bratio_plot$Year)
ggplot(Ensemble_Bratio_plot,aes(Year,Bratio,fill=Year))+
geom_violin()+
theme(legend.position="none")+
theme(axis.text.x = element_text(angle = 45, hjust = 1,vjust=0.5,size=10))+
ylab("SBt/SO0")
ggsave(paste0(Ensemble_model_dir_out,"/Ensemble_Bratio.png"))
#F plot
Ensemble_F_plot<-reshape2::melt(Ensemble_F,value.name="F")
colnames(Ensemble_F_plot)[2]<-"Year"
Ensemble_F_plot$Year<-as.factor(Ensemble_F_plot$Year)
ggplot(Ensemble_F_plot,aes(Year,F,fill=Year))+
geom_violin()+
theme(legend.position="none")+
theme(axis.text.x = element_text(angle = 45, hjust = 1,vjust=0.5,size=10))+
ylab("Fishing mortality")
ggsave(paste0(Ensemble_model_dir_out,"/Ensemble_F.png"))
#1-SPR plot
Ensemble_SPR_plot<-reshape2::melt(Ensemble_SO,value.name="SPR")
colnames(Ensemble_SPR_plot)[2]<-"Year"
Ensemble_SPR_plot$Year<-as.factor(Ensemble_SPR_plot$Year)
ggplot(Ensemble_SPR_plot,aes(Year,SPR,fill=Year))+
geom_violin()+
theme(legend.position="none")+
theme(axis.text.x = element_text(angle = 45, hjust = 1,vjust=0.5,size=10))+
ylab("1-SPR")
ggsave(paste0(Ensemble_model_dir_out,"/Ensemble_SPR.png"))
#Get simpler plots for SB0, SBcurrent, RSS, F, and SPR in terminal year
# ggplot(reshape2::melt(Ensemble_Bratio,value.name="Bratio"),aes(Var2,Bratio))+
# stat_summary(geom = "line", fun = median)+
# ylim(0,1)+
# stat_summary(geom = "ribbon", fun.data = mean_cl_quantile, alpha = 0.3)
#Make outputs
show_modal_spinner(spin="flower",color=wes_palettes$Rushmore[3],text="Saving ensemble objects")
Model.outputs<-list("Spawning Output"=SpOt_en,"Relative Stock Status"=Bratio_en,"Fishing mortality"=F_en,"1-SPR"=SPR_en)
Ensemble.outputs<-list("Spawning Output"=Ensemble_SO,"Relative Stock Status"=Ensemble_Bratio,"Fishing mortality"=Ensemble_F,"1-SPR"=Ensemble_SPR)
Ensemble.outputs.plots<-list("Spawning Output"=Ensemble_SO_plot,"Relative Stock Status"=Ensemble_Bratio_plot,"Fishing mortality"=Ensemble_F_plot,"1-SPR"=Ensemble_SPR_plot)
save(Model.outputs,file=paste0(Ensemble_model_dir_out,"/Model_results",".DMP"))
save(Ensemble.outputs,file=paste0(Ensemble_model_dir_out,"/Ensemble_results",".DMP"))
save(Ensemble.outputs.plots,file=paste0(Ensemble_model_dir_out,"/Ensemble_results_plots",".DMP"))
remove_modal_spinner()
# return(Ensemble.outputs)
})
#})
#observeEvent(req(input$run_Ensemble&exists("Ensemble.outputs()")),{
#
# })
#Create figures of weighted values
# output$Sensi_comp_plot <- renderImage({
# image.path<-normalizePath(file.path(paste0(path1(),"/Sensitivity Comparison Plots/",
# input$Sensi_comp_file, '.png')),mustWork=FALSE)
# return(list(
# src = image.path,
# contentType = "image/png",
# # width = 400,
# # height = 300,
# style='height:60vh'))
# },deleteFile=FALSE)
})
| /.history/server_20230517153602.r | no_license | shcaba/SS-DL-tool | R | false | false | 237,705 | r | require(shiny)
require(shinyjs)
require(r4ss)
require(plyr)
require(dplyr)
require(ggplot2)
require(reshape2)
require(data.table)
require(tidyr)
require(rlist)
require(viridis)
require(sss)
require(shinyWidgets)
require(shinyFiles)
require(HandyCode)
require(nwfscDiag)
require(shinybusy)
require(truncnorm)
require(flextable)
require(officer)
require(gridExtra)
require(ggpubr)
require(grid)
require(wesanderson)
require(adnuts)
require(shinystan)
#require(paletteer)
#require(RColorBrewer)
#require(ggthemes)
#devtools::load_all("C:/Users/Jason.Cope/Documents/Github/nwfscDiag")
source('Functions.r',local = FALSE)
theme_report <- function(base_size = 11) {
half_line <- base_size/2
theme_light(base_size = base_size) +
theme(
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.ticks.length = unit(half_line / 2.2, "pt"),
strip.background = element_rect(fill = NA, colour = NA),
strip.text.x = element_text(colour = "black"),
strip.text.y = element_text(colour = "black"),
panel.border = element_rect(fill = NA),
legend.key.size = unit(0.9, "lines"),
legend.key = element_rect(colour = NA, fill = NA),
legend.background = element_rect(colour = NA, fill = NA)
)
}
theme_set(theme_report())
shinyServer(function(input, output,session) {
useShinyjs()
theme_report <- function(base_size = 11) {
half_line <- base_size/2
theme_light(base_size = base_size) +
theme(
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.ticks.length = unit(half_line / 2.2, "pt"),
strip.background = element_rect(fill = NA, colour = NA),
strip.text.x = element_text(colour = "black"),
strip.text.y = element_text(colour = "black"),
panel.border = element_rect(fill = NA),
legend.key.size = unit(0.9, "lines"),
legend.key = element_rect(colour = NA, fill = NA),
legend.background = element_rect(colour = NA, fill = NA)
)
}
theme_set(theme_report())
#################
### FUNCTIONS ###
#################
VBGF<-function(Linf, k, t0, ages){
Linf * (1 - exp(-k * (ages - t0)))
}
VBGF.age<-function(Linf,k,t0,lt){
t0 - (log(1 - (lt / Linf)) / k)
}
RUN.SS<-function(path,ss.cmd=" -nohess -nox",OS.in="Windows"){
navigate <- paste("cd ", path, sep="")
if(OS.in=="Windows")
{
#command <- paste0(navigate," & ", "ss", ss.cmd)
#shell(command, invisible=TRUE, translate=TRUE)
run(path,exe="ss",extras=ss.cmd,skipfinished=FALSE,show_in_console = TRUE)
}
if(OS.in=="Mac")
{
command <- c(paste("cd", path), "chmod +x ./ss_osx",paste("./ss_osx", ss.cmd))
system(paste(command, collapse=";"),invisible=TRUE)
#command <- paste0(path,"/./ss_mac", ss.cmd)
#system(command, invisible=TRUE)
}
if(OS.in=="Linux")
{
command <- c(paste("cd", path), "chmod +x ./ss_linux",paste("./ss_linux", ss.cmd))
system(paste(command, collapse=";"), invisible=TRUE)
}
}
pngfun <- function(wd, file,w=7,h=7,pt=12){
file <- file.path(wd, file)
cat('writing PNG to',file,'\n')
png(filename=file,
width=w,height=h,
units='in',res=300,pointsize=pt)
}
rc <- function(n,alpha=1){
# a subset of rich.colors by Arni Magnusson from the gregmisc package
# a.k.a. rich.colors.short, but put directly in this function
# to try to diagnose problem with transparency on one computer
x <- seq(0, 1, length = n)
r <- 1/(1 + exp(20 - 35 * x))
g <- pmin(pmax(0, -0.8 + 6 * x - 5 * x^2), 1)
b <- dnorm(x, 0.25, 0.15)/max(dnorm(x, 0.25, 0.15))
rgb.m <- matrix(c(r, g, b), ncol = 3)
rich.vector <- apply(rgb.m, 1, function(v) rgb(v[1], v[2], v[3], alpha=alpha))
}
doubleNorm24.sel <- function(Sel50,Selpeak,PeakDesc,LtPeakFinal,FinalSel) {
#UPDATED: - input e and f on 0 to 1 scal and transfrom to logit scale
# - changed bin width in peak2 calculation
# - updated index of sel when j2 < length(x)
# - renamed input parameters, cannot have same names as the logitstic function
# - function not handling f < -1000 correctly
x<-seq(1,Selpeak+Selpeak,1)
bin_width <- x[2] - x[1]
a<- Selpeak
b<- -log((max(x)-Selpeak-bin_width)/(PeakDesc-Selpeak-bin_width))
c<- log(-((Sel50-Selpeak)^2/log(0.5)))
d<- log(LtPeakFinal)
e<- -15
f<- -log((1/(FinalSel+0.000000001)-1))
sel <- rep(NA, length(x))
startbin <- 1
peak <- a
upselex <- exp(c)
downselex <- exp(d)
final <- f
if (e < -1000) {
j1 <- -1001 - round(e)
sel[1:j1] <- 1e-06
}
if (e >= -1000) {
j1 <- startbin - 1
if (e > -999) {
point1 <- 1/(1 + exp(-e))
t1min <- exp(-(x[startbin] - peak)^2/upselex)
}
}
if (f < -1000)
j2 <- -1000 - round(f)
if (f >= -1000)
j2 <- length(x)
peak2 <- peak + bin_width + (0.99 * x[j2] - peak - bin_width)/(1 +
exp(-b))
if (f > -999) {
point2 <- 1/(1 + exp(-final))
t2min <- exp(-(x[j2] - peak2)^2/downselex)
}
t1 <- x - peak
t2 <- x - peak2
join1 <- 1/(1 + exp(-(20/(1 + abs(t1))) * t1))
join2 <- 1/(1 + exp(-(20/(1 + abs(t2))) * t2))
if (e > -999)
asc <- point1 + (1 - point1) * (exp(-t1^2/upselex) -
t1min)/(1 - t1min)
if (e <= -999)
asc <- exp(-t1^2/upselex)
if (f > -999)
dsc <- 1 + (point2 - 1) * (exp(-t2^2/downselex) -
1)/(t2min - 1)
if (f <= -999)
dsc <- exp(-(t2)^2/downselex)
idx.seq <- (j1 + 1):j2
sel[idx.seq] <- asc[idx.seq] * (1 - join1[idx.seq]) + join1[idx.seq] * (1 -
join2[idx.seq] + dsc[idx.seq] * join2[idx.seq])
if (startbin > 1 && e >= -1000) {
sel[1:startbin] <- (x[1:startbin]/x[startbin])^2 *
sel[startbin]
}
if (j2 < length(x))
sel[(j2 + 1):length(x)] <- sel[j2]
return(cbind(x,sel))
}
########## Clear data files and plots ############
rv.Lt <- reactiveValues(data = NULL,clear = FALSE)
rv.Age <- reactiveValues(data = NULL,clear = FALSE)
rv.Ct <- reactiveValues(data = NULL,clear = FALSE)
rv.Index <- reactiveValues(data = NULL,clear = FALSE)
rv.AgeErr <- reactiveValues(data = NULL,clear = FALSE)
########
#Reset catches
observe({
req(input$file2)
req(!rv.Ct$clear)
rv.Ct$data <- fread(input$file2$datapath,check.names=FALSE,data.table=FALSE)
#L <- readLines(input$file2$datapath, n = 1)
#if(grepl(";", L)) {rv.Ct$data <- read.csv2(input$file2$datapath,check.names=FALSE)}
})
observeEvent(input$file2, {
rv.Ct$clear <- FALSE
}, priority = 1000)
observeEvent(input$reset_ct, {
rv.Ct$data <- NULL
rv.Ct$clear <- TRUE
reset('file2')
}, priority = 1000)
#Reset lengths
observe({
req(input$file1)
req(!rv.Lt$clear)
rv.Lt$data <- fread(input$file1$datapath,check.names=FALSE,data.table=FALSE)
#L <- readLines(input$file1$datapath, n = 1)
#rv.Lt$data <- read.csv(input$file1$datapath,check.names=FALSE)
#if(grepl(";", L)) {rv.Lt$data <- read.csv2(input$file1$datapath,check.names=FALSE)}
})
observeEvent(input$file1, {
rv.Lt$clear <- FALSE
}, priority = 1000)
observeEvent(input$reset_lt, {
rv.Lt$data <- NULL
rv.Lt$clear <- TRUE
reset('file1')
}, priority = 1000)
#Reset ages
observe({
req(input$file3)
req(!rv.Age$clear)
rv.Age$data <- fread(input$file3$datapath,check.names=FALSE,data.table=FALSE)
#L <- readLines(input$file3$datapath, n = 1)
#if(grepl(";", L)) {rv.Age$data <- read.csv2(input$file3$datapath,check.names=FALSE)}
})
observeEvent(input$file3, {
rv.Age$clear <- FALSE
}, priority = 1000)
observeEvent(input$reset_age, {
rv.Age$data <- NULL
rv.Age$clear <- TRUE
reset('file3')
}, priority = 1000)
#Reset ageing error
observe({
req(input$file33)
req(!rv.AgeErr$clear)
rv.AgeErr$data <- fread(input$file33$datapath,check.names=FALSE,header=FALSE,data.table=FALSE)
#L <- readLines(input$file33$datapath, n = 1)
#if(grepl(";", L)) {rv.AgeErr$data <- read.csv2(input$file33$datapath,check.names=FALSE,header=FALSE)}
})
observeEvent(input$file33, {
rv.AgeErr$clear <- FALSE
if(!input$Ageing_error_choice){
rv.AgeErr$data <- NULL
rv.AgeErr$clear <- TRUE
reset('file33')}
}, priority = 1000)
# # if(!is.null(input$Ageing_error_choice)){
# observeEvent(input$file33, {
# if(!input$Ageing_error_choice){
# rv.AgeErr$data <- NULL
# rv.AgeErr$clear <- TRUE
# reset('file33') #}
# }, priority = 1000)
# }
#Reset index
observe({
req(input$file4)
req(!rv.Index$clear)
rv.Index$data <- fread(input$file4$datapath,check.names=FALSE,data.table=FALSE)
#L <- readLines(input$file4$datapath, n = 1)
#rv.Index$data <- read.csv(input$file4$datapath,check.names=FALSE)
#if(grepl(";", L)) {rv.Index$data <- read.csv2(input$file4$datapath,check.names=FALSE,header=FALSE)}
})
observeEvent(input$file4, {
rv.Index$clear <- FALSE
}, priority = 1000)
observeEvent(input$reset_index, {
rv.Index$data <- NULL
rv.Index$clear <- TRUE
reset('file4')
}, priority = 1000)
#Throw an error if fleets are not consecutively represented in all loaded data sets.
observeEvent(req(any(!is.null(rv.Ct$data),!is.null(rv.Lt$data),!is.null(rv.Age$data),!is.null(rv.Index$data))),{
ct.flt<-lt.flt<-age.flt<-index.flt<-NA
if(!is.null(rv.Ct$data)){ct.flt<-c(1:(ncol(rv.Ct$data)))}
if(!is.null(rv.Lt$data)){lt.flt<-rv.Lt$data[,3]}
if(!is.null(rv.Age$data)){age.flt<-rv.Age$data[,3]}
if(!is.null(rv.Index$data)){index.flt<-rv.Index$data[,3]}
fleets.no.negs<-unique(na.omit(c(ct.flt,lt.flt,age.flt,index.flt)))[unique(na.omit(c(ct.flt,lt.flt,age.flt,index.flt)))>0] #remove any negative fleets
if(length(fleets.no.negs)!=length(seq(1:max(fleets.no.negs))))
{
sendSweetAlert(
session = session,
title = "Model Warning",
text = "Non-consecutive fleet numbering. Check all data sets (e.g., catch, lengths, ages, indices) to make sure all fleets from 1 to the maximum fleet number are found when considered across all data sets. For instance, if you have 3 total fleets, there should not be a fleet number > 3 (e.g., 1,2,4). All fleets are not expected in each data file, just across all data files.",
type = "warning")
}
})
#######
# observeEvent(input$reset_lt, {
# rv.Lt$data <- NULL
# shinyjs::reset('file1')
# })
# # observeEvent(input$reset_lt, {
# # output$Ltplot<-renderPlot({
# # rv.Lt$data <- NULL
# # if (is.null(rv.Lt$data)) return(NULL)
# # })
# # })
# observeEvent(input$reset_age, {
# rv.Age$data <- NULL
# shinyjs::reset('file3')
# })
# observeEvent(input$reset_ct, {
# rv.Ct$data <- NULL
# shinyjs::reset('file2')
# })
#####################################################
onclick("est_LHparms",id="panel_SS_est")
observe({
shinyjs::show("Data_panel")
hideTab(inputId = "tabs", target = "11")
#shinyjs::hide("OS_choice")
#shinyjs::hide("run_SS")
#shinyjs::hide("run_SSS")
})
#To get the ObserveEvent to work, each statement in req needs to be unique.
#This explains the workaround of ((as.numeric(input$tabs)*x)/x)<4, where x is the unique type of assessment being run
#This input allows other tabs to have different side panels.
#Switch back to data from different tabs
observeEvent(req(((as.numeric(input$tabs)*99)/99)<4), {
shinyjs::show("Data_panel")
shinyjs::show("Existing_files")
shinyjs::hide("panel_Ct_F_LO")
shinyjs::hide("panel_data_wt_lt")
shinyjs::hide("panel_ct_wt_LO")
shinyjs::hide("panel_SSS")
shinyjs::hide("panel_SSLO_LH")
shinyjs::hide("panel_SSLO_fixed")
shinyjs::hide("panel_SS_LH_fixed_est_tog")
shinyjs::hide("panel_SS_LH_fixed")
shinyjs::hide("panel_SS_fixed")
shinyjs::hide("panel_SS_LH_est")
shinyjs::hide("panel_SS_est")
shinyjs::hide("panel_SS_stock_status")
shinyjs::hide("panel_SSS_prod")
shinyjs::hide("panel_SS_LO_prod")
shinyjs::hide("panel_SS_prod_fixed")
shinyjs::hide("panel_SS_prod_est")
shinyjs::hide("panel_selectivity")
shinyjs::hide("panel_selectivity_sss")
shinyjs::hide("panel_SS_recdevs")
shinyjs::hide("panel_SS_jitter")
shinyjs::hide("panel_RPs")
shinyjs::hide("panel_Forecasts")
shinyjs::hide("panel_Mod_dims")
shinyjs::hide("panel_advanced_SS")
shinyjs::hide("panel_advanced_user_SS")
shinyjs::hide("panel_advanced_SSS")
shinyjs::hide("panel_SSS_reps")
shinyjs::hide("OS_choice")
shinyjs::hide("Scenario_panel")
shinyjs::hide("run_SSS")
shinyjs::hide("run_SS")
shinyjs::hide("Modeff_panel")
shinyjs::hide("Profile_panel")
shinyjs::hide("Retro_panel")
shinyjs::hide("Sensi_Comparison_panel")
shinyjs::hide("Ensemble_panel")
hideTab(inputId = "tabs", target = "11")
showTab(inputId = "tabs", target = "2")
# hideTab(inputId = "tabs", target = "3")
# hideTab(inputId = "tabs", target = "4")
# hideTab(inputId = "tabs", target = "5")
# hideTab(inputId = "tabs", target = "6")
})
#Reset when all things are clicked off
observeEvent(req(((as.numeric(input$tabs)*1)/1)<4&is.null(rv.Lt$data)&is.null(rv.Ct$data)&is.null(rv.Age$data)&is.null(rv.Index$data)&any(is.null(input$user_model),!input$user_model)), {
shinyjs::show("Data_panel")
shinyjs::show("Existing_files")
shinyjs::hide("panel_Ct_F_LO")
shinyjs::hide("panel_data_wt_lt")
shinyjs::hide("panel_ct_wt_LO")
shinyjs::hide("panel_SSS")
shinyjs::hide("panel_SSLO_LH")
shinyjs::hide("panel_SSLO_fixed")
shinyjs::hide("panel_SS_LH_fixed_est_tog")
shinyjs::hide("panel_SS_LH_fixed")
shinyjs::hide("panel_SS_fixed")
shinyjs::hide("panel_SS_LH_est")
shinyjs::hide("panel_SS_est")
shinyjs::hide("panel_SS_stock_status")
shinyjs::hide("panel_SSS_prod")
shinyjs::hide("panel_SS_LO_prod")
shinyjs::hide("panel_SS_prod_fixed")
shinyjs::hide("panel_SS_prod_est")
shinyjs::hide("panel_selectivity")
shinyjs::hide("panel_selectivity_sss")
shinyjs::hide("panel_SS_recdevs")
shinyjs::hide("panel_SS_jitter")
shinyjs::hide("panel_RPs")
shinyjs::hide("panel_Forecasts")
shinyjs::hide("panel_Mod_dims")
shinyjs::hide("panel_advanced_SS")
shinyjs::hide("panel_advanced_user_SS")
shinyjs::hide("panel_advanced_SSS")
shinyjs::hide("panel_SSS_reps")
shinyjs::hide("OS_choice")
shinyjs::hide("Scenario_panel")
shinyjs::hide("run_SSS")
shinyjs::hide("run_SS")
shinyjs::hide("Modeff_panel")
shinyjs::hide("Profile_panel")
shinyjs::hide("Retro_panel")
shinyjs::hide("Sensi_Comparison_panel")
shinyjs::hide("Ensemble_panel")
hideTab(inputId = "tabs", target = "11")
showTab(inputId = "tabs", target = "2")
})
#User chosen model
observeEvent(req(!is.null(input$user_model)&input$user_model), {
shinyjs::show("Data_panel")
shinyjs::show("Existing_files")
shinyjs::hide("panel_Ct_F_LO")
shinyjs::show("panel_data_wt_lt")
shinyjs::hide("panel_ct_wt_LO")
shinyjs::hide("panel_SSS")
shinyjs::hide("panel_SSLO_LH")
shinyjs::hide("panel_SSLO_fixed")
shinyjs::hide("panel_SS_LH_fixed_est_tog")
shinyjs::hide("panel_SS_LH_fixed")
shinyjs::hide("panel_SS_fixed")
shinyjs::hide("panel_SS_LH_est")
shinyjs::hide("panel_SS_est")
shinyjs::hide("panel_SS_stock_status")
shinyjs::hide("panel_SSS_prod")
shinyjs::hide("panel_SS_LO_prod")
shinyjs::hide("panel_SS_prod_fixed")
shinyjs::hide("panel_SS_prod_est")
shinyjs::hide("panel_selectivity")
shinyjs::hide("panel_selectivity_sss")
shinyjs::hide("panel_SS_recdevs")
shinyjs::show("panel_SS_jitter")
shinyjs::show("panel_RPs")
shinyjs::show("panel_Forecasts")
shinyjs::hide("panel_Mod_dims")
shinyjs::hide("panel_SSS_reps")
shinyjs::hide("panel_advanced_SS")
shinyjs::show("panel_advanced_user_SS")
shinyjs::hide("panel_advanced_SSS")
shinyjs::show("OS_choice")
shinyjs::show("Scenario_panel")
shinyjs::hide("run_SSS")
shinyjs::show("run_SS")
shinyjs::hide("Modeff_panel")
shinyjs::hide("Profile_panel")
shinyjs::hide("Retro_panel")
shinyjs::hide("Sensi_Comparison_panel")
shinyjs::hide("Ensemble_panel")
#shinyjs::show("tab_sss")
showTab(inputId = "tabs", target = "11")
hideTab(inputId = "tabs", target = "2")
})
#SSS panels
observeEvent(req(((as.numeric(input$tabs)*1)/1)<4&is.null(rv.Lt$data)&!is.null(rv.Ct$data)&is.null(rv.Age$data)&is.null(rv.Index$data)&any(is.null(input$user_model),!input$user_model)), {
shinyjs::show("Data_panel")
shinyjs::hide("Existing_files")
shinyjs::hide("panel_Ct_F_LO")
shinyjs::hide("panel_data_wt_lt")
shinyjs::hide("panel_ct_wt_LO")
shinyjs::show("panel_SSS")
shinyjs::hide("panel_SSLO_LH")
shinyjs::hide("panel_SSLO_fixed")
shinyjs::hide("panel_SS_LH_fixed_est_tog")
shinyjs::hide("panel_SS_LH_fixed")
shinyjs::hide("panel_SS_fixed")
shinyjs::hide("panel_SS_LH_est")
shinyjs::hide("panel_SS_est")
shinyjs::show("panel_SS_stock_status")
shinyjs::show("panel_SSS_prod")
shinyjs::hide("panel_SS_LO_prod")
shinyjs::hide("panel_SS_prod_fixed")
shinyjs::hide("panel_SS_prod_est")
shinyjs::hide("panel_selectivity")
shinyjs::show("panel_selectivity_sss")
shinyjs::hide("panel_SS_recdevs")
shinyjs::hide("panel_SS_jitter")
shinyjs::show("panel_RPs")
shinyjs::show("panel_Forecasts")
shinyjs::show("panel_Mod_dims")
shinyjs::show("panel_SSS_reps")
shinyjs::hide("panel_advanced_SS")
shinyjs::hide("panel_advanced_user_SS")
shinyjs::show("panel_advanced_SSS")
shinyjs::show("OS_choice")
shinyjs::show("Scenario_panel")
shinyjs::show("run_SSS")
shinyjs::hide("run_SS")
shinyjs::hide("Modeff_panel")
shinyjs::hide("Profile_panel")
shinyjs::hide("Retro_panel")
shinyjs::hide("Sensi_Comparison_panel")
shinyjs::hide("Ensemble_panel")
#shinyjs::show("tab_sss")
showTab(inputId = "tabs", target = "11")
hideTab(inputId = "tabs", target = "2")
})
#SS-LO panels
observeEvent(req(((as.numeric(input$tabs)*2)/2)<4&all(!is.null(c(rv.Lt$data,rv.Age$data)),is.null(rv.Ct$data))&any(is.null(input$user_model),!input$user_model)), {
shinyjs::show("Data_panel")
shinyjs::show("Existing_files")
shinyjs::show("panel_Ct_F_LO")
shinyjs::show("panel_data_wt_lt")
if(length(unique(rv.Lt$data[,3]))>1|length(unique(rv.Age$data[,3]))>1){shinyjs::show("panel_ct_wt_LO")}
if(length(unique(rv.Lt$data[,3]))==1|length(unique(rv.Age$data[,3]))==1){shinyjs::hide("panel_ct_wt_LO")}
#if(input$Ct_F_LO_select){shinyjs::show("panel_ct_wt_LO")}
#if(input$Ct_F_LO_select==NULL){shinyjs::hide("panel_ct_wt_LO")}
shinyjs::hide("panel_SSS")
shinyjs::show("panel_SSLO_LH")
shinyjs::show("panel_SSLO_fixed")
shinyjs::hide("panel_SS_LH_fixed_est_tog")
shinyjs::hide("panel_SS_LH_fixed")
shinyjs::hide("panel_SS_fixed")
shinyjs::hide("panel_SS_LH_est")
shinyjs::hide("panel_SS_est")
shinyjs::hide("panel_SS_stock_status")
shinyjs::hide("panel_SSS_prod")
shinyjs::show("panel_SS_LO_prod")
shinyjs::hide("panel_SS_prod_fixed")
shinyjs::hide("panel_SS_prod_est")
shinyjs::show("panel_selectivity")
shinyjs::hide("panel_selectivity_sss")
shinyjs::show("panel_SS_recdevs")
shinyjs::show("panel_SS_jitter")
shinyjs::show("panel_RPs")
shinyjs::show("panel_Forecasts")
shinyjs::show("panel_Mod_dims")
shinyjs::show("panel_advanced_SS")
shinyjs::hide("panel_advanced_user_SS")
shinyjs::hide("panel_advanced_SSS")
shinyjs::hide("panel_SSS_reps")
shinyjs::show("OS_choice")
shinyjs::show("Scenario_panel")
shinyjs::hide("run_SSS")
shinyjs::show("run_SS")
shinyjs::hide("Modeff_panel")
shinyjs::hide("Profile_panel")
shinyjs::hide("Retro_panel")
shinyjs::hide("Sensi_Comparison_panel")
shinyjs::hide("Ensemble_panel")
hideTab(inputId = "tabs", target = "11")
showTab(inputId = "tabs", target = "2")
# showTab(inputId = "tabs", target = "3")
# showTab(inputId = "tabs", target = "4")
# hideTab(inputId = "tabs", target = "5")
# showTab(inputId = "tabs", target = "6")
})
#SS-CL fixed parameters
observeEvent(req(((as.numeric(input$tabs)*3)/3)<4&all(any(input$est_parms==FALSE,input$est_parms2==FALSE),any(all(!is.null(rv.Lt$data),!is.null(rv.Ct$data)),all(!is.null(rv.Age$data),!is.null(rv.Ct$data)),all(!is.null(rv.Index$data),!is.null(rv.Ct$data))))&any(is.null(input$user_model),!input$user_model)), {
shinyjs::show("Data_panel")
shinyjs::show("Existing_files")
shinyjs::hide("panel_Ct_F_LO")
if(any(!is.null(rv.Lt$data),!is.null(rv.Age$data))){shinyjs::show("panel_data_wt_lt")}
else (shinyjs::hide("panel_data_wt_lt"))
shinyjs::hide("panel_ct_wt_LO")
shinyjs::hide("panel_SSS")
shinyjs::hide("panel_SSLO_LH")
shinyjs::hide("panel_SSLO_fixed")
shinyjs::show("panel_SS_LH_fixed_est_tog")
shinyjs::show("panel_SS_LH_fixed")
shinyjs::show("panel_SS_fixed")
shinyjs::hide("panel_SS_LH_est")
shinyjs::hide("panel_SS_est")
shinyjs::hide("panel_SS_stock_status")
shinyjs::hide("panel_SSS_prod")
shinyjs::hide("panel_SS_LO_prod")
shinyjs::show("panel_SS_prod_fixed")
shinyjs::hide("panel_SS_prod_est")
shinyjs::show("panel_selectivity")
shinyjs::hide("panel_selectivity_sss")
shinyjs::show("panel_SS_recdevs")
shinyjs::show("panel_SS_jitter")
shinyjs::show("panel_RPs")
shinyjs::show("panel_Forecasts")
shinyjs::show("panel_Mod_dims")
shinyjs::show("panel_advanced_SS")
shinyjs::hide("panel_advanced_user_SS")
shinyjs::hide("panel_advanced_SSS")
shinyjs::show("OS_choice")
shinyjs::show("Scenario_panel")
shinyjs::hide("panel_SSS_reps")
shinyjs::hide("run_SSS")
shinyjs::show("run_SS")
shinyjs::hide("Modeff_panel")
shinyjs::hide("Profile_panel")
shinyjs::hide("Retro_panel")
shinyjs::hide("Sensi_Comparison_panel")
shinyjs::hide("Ensemble_panel")
#shinyjs::hide(selector = "#navbar li a[data-value=11]")
hideTab(inputId = "tabs", target = "11")
showTab(inputId = "tabs", target = "2")
# show(selector = '#hello li a[data-value="2"]')
#show(selector = '#hello li a[data-value="2"]')
# showTab(inputId = "tabs", target = "2")
# showTab(inputId = "tabs", target = "3")
# showTab(inputId = "tabs", target = "4")
# showTab(inputId = "tabs", target = "5")
# showTab(inputId = "tabs", target = "6")
})
#SS-CL with parameter estimates
observeEvent(req(((as.numeric(input$tabs)*4)/4)<4&all(input$est_parms==TRUE,any(all(!is.null(rv.Lt$data),!is.null(rv.Ct$data)),all(!is.null(rv.Age$data),!is.null(rv.Ct$data)),all(!is.null(rv.Index$data),!is.null(rv.Ct$data))))&any(is.null(input$user_model),!input$user_model)), {
shinyjs::show("Data_panel")
shinyjs::show("Existing_files")
shinyjs::hide("panel_Ct_F_LO")
if(any(!is.null(rv.Lt$data),!is.null(rv.Age$data))){shinyjs::show("panel_data_wt_lt")}
else (shinyjs::hide("panel_data_wt_lt"))
shinyjs::hide("panel_ct_wt_LO")
shinyjs::hide("panel_SSS")
shinyjs::hide("panel_SSLO_LH")
shinyjs::hide("panel_SSLO_fixed")
shinyjs::show("panel_SS_LH_fixed_est_tog")
shinyjs::hide("panel_SS_LH_fixed")
shinyjs::hide("panel_SS_fixed")
shinyjs::show("panel_SS_LH_est")
shinyjs::show("panel_SS_est")
shinyjs::hide("panel_SS_stock_status")
shinyjs::hide("panel_SSS_prod")
shinyjs::hide("panel_SS_LO_prod")
shinyjs::hide("panel_SS_prod_fixed")
shinyjs::show("panel_SS_prod_est")
shinyjs::show("panel_selectivity")
shinyjs::hide("panel_selectivity_sss")
shinyjs::show("panel_SS_recdevs")
shinyjs::show("panel_SS_jitter")
shinyjs::show("panel_RPs")
shinyjs::show("panel_Forecasts")
shinyjs::show("panel_Mod_dims")
shinyjs::show("panel_advanced_SS")
shinyjs::hide("panel_advanced_user_SS")
shinyjs::hide("panel_advanced_SSS")
shinyjs::show("OS_choice")
shinyjs::show("Scenario_panel")
shinyjs::hide("panel_SSS_reps")
shinyjs::hide("run_SSS")
shinyjs::show("run_SS")
shinyjs::hide("Modeff_panel")
shinyjs::hide("Profile_panel")
shinyjs::hide("Retro_panel")
shinyjs::hide("Sensi_Comparison_panel")
shinyjs::hide("Ensemble_panel")
hideTab(inputId = "tabs", target = "11")
showTab(inputId = "tabs", target = "2")
# showTab(inputId = "tabs", target = "3")
# showTab(inputId = "tabs", target = "4")
# showTab(inputId = "tabs", target = "5")
# showTab(inputId = "tabs", target = "6")
})
#Model Efficiency
observeEvent(req((as.numeric(input$tabs)*12/12)==12), {
shinyjs::hide("Data_panel")
shinyjs::hide("Existing_files")
shinyjs::hide("panel_Ct_F_LO")
shinyjs::hide("panel_data_wt_lt")
shinyjs::hide("panel_ct_wt_LO")
shinyjs::hide("panel_SSS")
shinyjs::hide("panel_SSLO_LH")
shinyjs::hide("panel_SSLO_fixed")
shinyjs::hide("panel_SS_LH_fixed_est_tog")
shinyjs::hide("panel_SS_LH_fixed")
shinyjs::hide("panel_SS_fixed")
shinyjs::hide("panel_SS_LH_est")
shinyjs::hide("panel_SS_est")
shinyjs::hide("panel_SS_stock_status")
shinyjs::hide("panel_SSS_prod")
shinyjs::hide("panel_SS_LO_prod")
shinyjs::hide("panel_SS_prod_fixed")
shinyjs::hide("panel_SS_prod_est")
shinyjs::hide("panel_selectivity")
shinyjs::hide("panel_selectivity_sss")
shinyjs::hide("panel_SS_recdevs")
shinyjs::hide("panel_SS_jitter")
shinyjs::hide("panel_RPs")
shinyjs::hide("panel_Forecasts")
shinyjs::hide("panel_Mod_dims")
shinyjs::hide("panel_advanced_SS")
shinyjs::hide("panel_advanced_user_SS")
shinyjs::hide("panel_advanced_SSS")
shinyjs::hide("OS_choice")
shinyjs::hide("Scenario_panel")
shinyjs::hide("panel_SSS_reps")
shinyjs::hide("run_SSS")
shinyjs::hide("run_SS")
shinyjs::show("Modeff_panel")
shinyjs::hide("Profile_panel")
shinyjs::hide("Retro_panel")
shinyjs::hide("Sensi_Comparison_panel")
shinyjs::hide("Ensemble_panel")
hideTab(inputId = "tabs", target = "11")
showTab(inputId = "tabs", target = "2")
# showTab(inputId = "tabs", target = "3")
# showTab(inputId = "tabs", target = "4")
# showTab(inputId = "tabs", target = "5")
# showTab(inputId = "tabs", target = "6")
})
#Profiles
observeEvent(req((as.numeric(input$tabs)*4/4)==4), {
shinyjs::hide("Data_panel")
shinyjs::hide("Existing_files")
shinyjs::hide("panel_Ct_F_LO")
shinyjs::hide("panel_data_wt_lt")
shinyjs::hide("panel_ct_wt_LO")
shinyjs::hide("panel_SSS")
shinyjs::hide("panel_SSLO_LH")
shinyjs::hide("panel_SSLO_fixed")
shinyjs::hide("panel_SS_LH_fixed_est_tog")
shinyjs::hide("panel_SS_LH_fixed")
shinyjs::hide("panel_SS_fixed")
shinyjs::hide("panel_SS_LH_est")
shinyjs::hide("panel_SS_est")
shinyjs::hide("panel_SS_stock_status")
shinyjs::hide("panel_SSS_prod")
shinyjs::hide("panel_SS_LO_prod")
shinyjs::hide("panel_SS_prod_fixed")
shinyjs::hide("panel_SS_prod_est")
shinyjs::hide("panel_selectivity")
shinyjs::hide("panel_selectivity_sss")
shinyjs::hide("panel_SS_recdevs")
shinyjs::hide("panel_SS_jitter")
shinyjs::hide("panel_RPs")
shinyjs::hide("panel_Forecasts")
shinyjs::hide("panel_Mod_dims")
shinyjs::hide("panel_advanced_SS")
shinyjs::hide("panel_advanced_user_SS")
shinyjs::hide("panel_advanced_SSS")
shinyjs::hide("OS_choice")
shinyjs::hide("Scenario_panel")
shinyjs::hide("panel_SSS_reps")
shinyjs::hide("run_SSS")
shinyjs::hide("run_SS")
shinyjs::hide("Modeff_panel")
shinyjs::show("Profile_panel")
shinyjs::hide("Retro_panel")
shinyjs::hide("Sensi_Comparison_panel")
shinyjs::hide("Ensemble_panel")
hideTab(inputId = "tabs", target = "11")
showTab(inputId = "tabs", target = "2")
# showTab(inputId = "tabs", target = "3")
# showTab(inputId = "tabs", target = "4")
# showTab(inputId = "tabs", target = "5")
# showTab(inputId = "tabs", target = "6")
})
#Retrospecitves
observeEvent(req((as.numeric(input$tabs)*5/5)==5), {
shinyjs::hide("Data_panel")
shinyjs::hide("Existing_files")
shinyjs::hide("panel_Ct_F_LO")
shinyjs::hide("panel_data_wt_lt")
shinyjs::hide("panel_ct_wt_LO")
shinyjs::hide("panel_SSS")
shinyjs::hide("panel_SSLO_LH")
shinyjs::hide("panel_SSLO_fixed")
shinyjs::hide("panel_SS_LH_fixed_est_tog")
shinyjs::hide("panel_SS_LH_fixed")
shinyjs::hide("panel_SS_fixed")
shinyjs::hide("panel_SS_LH_est")
shinyjs::hide("panel_SS_est")
shinyjs::hide("panel_SS_stock_status")
shinyjs::hide("panel_SSS_prod")
shinyjs::hide("panel_SS_LO_prod")
shinyjs::hide("panel_SS_prod_fixed")
shinyjs::hide("panel_SS_prod_est")
shinyjs::hide("panel_selectivity")
shinyjs::hide("panel_selectivity_sss")
shinyjs::hide("panel_SS_recdevs")
shinyjs::hide("panel_SS_jitter")
shinyjs::hide("panel_RPs")
shinyjs::hide("panel_Forecasts")
shinyjs::hide("panel_Mod_dims")
shinyjs::hide("panel_advanced_SS")
shinyjs::hide("panel_advanced_user_SS")
shinyjs::hide("panel_advanced_SSS")
shinyjs::hide("OS_choice")
shinyjs::hide("Scenario_panel")
shinyjs::hide("panel_SSS_reps")
shinyjs::hide("run_SSS")
shinyjs::hide("run_SS")
shinyjs::hide("Modeff_panel")
shinyjs::hide("Profile_panel")
shinyjs::show("Retro_panel")
shinyjs::hide("Sensi_Comparison_panel")
shinyjs::hide("Ensemble_panel")
hideTab(inputId = "tabs", target = "11")
showTab(inputId = "tabs", target = "2")
# showTab(inputId = "tabs", target = "3")
# showTab(inputId = "tabs", target = "4")
# showTab(inputId = "tabs", target = "5")
# showTab(inputId = "tabs", target = "6")
})
#Sensitivities
observeEvent(req((as.numeric(input$tabs)*6/6)==6), {
shinyjs::hide("Data_panel")
shinyjs::hide("Existing_files")
shinyjs::hide("panel_Ct_F_LO")
shinyjs::hide("panel_data_wt_lt")
shinyjs::hide("panel_ct_wt_LO")
shinyjs::hide("panel_SSS")
shinyjs::hide("panel_SSLO_LH")
shinyjs::hide("panel_SSLO_fixed")
shinyjs::hide("panel_SS_LH_fixed_est_tog")
shinyjs::hide("panel_SS_LH_fixed")
shinyjs::hide("panel_SS_fixed")
shinyjs::hide("panel_SS_LH_est")
shinyjs::hide("panel_SS_est")
shinyjs::hide("panel_SS_stock_status")
shinyjs::hide("panel_SSS_prod")
shinyjs::hide("panel_SS_LO_prod")
shinyjs::hide("panel_SS_prod_fixed")
shinyjs::hide("panel_SS_prod_est")
shinyjs::hide("panel_selectivity")
shinyjs::hide("panel_selectivity_sss")
shinyjs::hide("panel_SS_recdevs")
shinyjs::hide("panel_SS_jitter")
shinyjs::hide("panel_RPs")
shinyjs::hide("panel_Forecasts")
shinyjs::hide("panel_Mod_dims")
shinyjs::hide("panel_advanced_SS")
shinyjs::hide("panel_advanced_user_SS")
shinyjs::hide("panel_advanced_SSS")
shinyjs::hide("OS_choice")
shinyjs::hide("Scenario_panel")
shinyjs::hide("panel_SSS_reps")
shinyjs::hide("run_SSS")
shinyjs::hide("run_SS")
shinyjs::hide("Modeff_panel")
shinyjs::hide("Profile_panel")
shinyjs::hide("Retro_panel")
shinyjs::show("Sensi_Comparison_panel")
shinyjs::hide("Ensemble_panel")
hideTab(inputId = "tabs", target = "11")
showTab(inputId = "tabs", target = "2")
# showTab(inputId = "tabs", target = "3")
# showTab(inputId = "tabs", target = "4")
# showTab(inputId = "tabs", target = "5")
# showTab(inputId = "tabs", target = "6")
})
#Ensembles
observeEvent(req((as.numeric(input$tabs)*7/7)==7), {
shinyjs::hide("Data_panel")
shinyjs::hide("Existing_files")
shinyjs::hide("panel_Ct_F_LO")
shinyjs::hide("panel_data_wt_lt")
shinyjs::hide("panel_ct_wt_LO")
shinyjs::hide("panel_SSS")
shinyjs::hide("panel_SSLO_LH")
shinyjs::hide("panel_SSLO_fixed")
shinyjs::hide("panel_SS_LH_fixed_est_tog")
shinyjs::hide("panel_SS_LH_fixed")
shinyjs::hide("panel_SS_fixed")
shinyjs::hide("panel_SS_LH_est")
shinyjs::hide("panel_SS_est")
shinyjs::hide("panel_SS_stock_status")
shinyjs::hide("panel_SSS_prod")
shinyjs::hide("panel_SS_LO_prod")
shinyjs::hide("panel_SS_prod_fixed")
shinyjs::hide("panel_SS_prod_est")
shinyjs::hide("panel_selectivity")
shinyjs::hide("panel_selectivity_sss")
shinyjs::hide("panel_SS_recdevs")
shinyjs::hide("panel_SS_jitter")
shinyjs::hide("panel_RPs")
shinyjs::hide("panel_Forecasts")
shinyjs::hide("panel_Mod_dims")
shinyjs::hide("panel_advanced_SS")
shinyjs::hide("panel_advanced_user_SS")
shinyjs::hide("panel_advanced_SSS")
shinyjs::hide("panel_SSS_reps")
shinyjs::hide("OS_choice")
shinyjs::hide("Scenario_panel")
shinyjs::hide("run_SSS")
shinyjs::hide("run_SS")
shinyjs::hide("Modeff_panel")
shinyjs::hide("Profile_panel")
shinyjs::hide("Retro_panel")
shinyjs::hide("Sensi_Comparison_panel")
shinyjs::show("Ensemble_panel")
hideTab(inputId = "tabs", target = "11")
showTab(inputId = "tabs", target = "2")
# showTab(inputId = "tabs", target = "3")
# showTab(inputId = "tabs", target = "4")
# showTab(inputId = "tabs", target = "5")
# showTab(inputId = "tabs", target = "6")
})
########################################
#############################
######### UI INPUTS #########
#############################
# User activated pop-up parameter values ---------------
#Model dimensions
output$Model_dims1 <- renderUI({
inFile1 = rv.Lt$data
inFile2 = rv.Ct$data
inFile3 = rv.Age$data
#No file inputs
if (is.null(inFile1) & is.null(inFile2) & is.null(inFile3)) return(NULL)
#If have lengths and/or ages, but no catches
if (any(!is.null(inFile1), !is.null(inFile3))& is.null(inFile2)){
styr.in = min(inFile1[,1],inFile3[,1])
endyr.in = max(inFile1[,1],inFile3[,1])
# if(!(anyNA(c(Linf(), k_vbgf(),t0_vbgf())))& input$Ct_F_LO_select=="Constant Catch"){
# styr.in = min(inFile1[,1],inFile3[,1])-round(VBGF.age(Linf(), k_vbgf(), t0_vbgf(), Linf()*0.95))
# }
}
#If have catches
if (!is.null(inFile2)){
styr.in<-min(inFile2[,1])
endyr.in<-max(inFile2[,1])
}
#If lengths or ages with catches
if (!is.null(inFile1) &!is.null(inFile2)|!is.null(inFile3) &!is.null(inFile2)){
styr.in<-min(inFile1[,1],inFile2[,1],inFile3[,1])
endyr.in<-max(inFile1[,1],inFile2[,1],inFile3[,1])
}
fluidRow(column(width=4, numericInput("styr", "Starting year",
value=styr.in, min=1, max=10000, step=1)),
column(width=4, numericInput("endyr","Ending year",
value=endyr.in, min=1, max=10000, step=1)))
# if (!is.null(inFile2)){
# fluidRow(column(width=4, numericInput("styr", "Starting year",
# value=min(inFile2[,1]), min=1, max=10000, step=1)),
# column(width=4, numericInput("endyr", "Ending year",
# value=max(inFile2[,1]), min=1, max=10000, step=1)))
# }
# print(styr.in)
# print(endyr.in)
})
output$Model_dims2 <- renderUI({
Ct.data = rv.Ct$data
# if (is.null(Ct.data)) return(NULL)
if (!is.null(Ct.data)){
fluidRow(column(width=4, numericInput("styr", "Starting year",
value=min(Ct.data[,1]), min=1, max=10000, step=1)),
column(width=4, numericInput("endyr", "Ending year",
value=max(Ct.data[,1]), min=1, max=10000, step=1)))
}
})
# output$Female_parms_inputs_label <- reactive({
# if(!is.null(input$file1))
# {
# (output$Female_parms_inputs_label<- renderUI({
# fluidRow(column(width=6,numericInput("Nages","Max. age", value=NA,min=1, max=1000, step=1)),
# column(width=6,numericInput("M_f", "Natural mortality", value=NA,min=0, max=10000, step=0.01)))
# }))
# }
# })
#Male life history parameters
output$Male_parms_inputs_label <- renderUI({
if(input$male_parms){
h5(em("Male"))
}
})
output$Male_parms_inputs1 <- renderUI({
if(input$male_parms){
fluidRow(column(width=6, numericInput("M_m", "Natural mortality",
value=NA, min=0, max=10000, step=0.01)),
column(width=6, numericInput("Linf_m", "Asymptotic size (Linf)",
value=NA, min=0, max=10000, step=0.01)))
}
})
output$Male_parms_inputs2 <- renderUI({
if(input$male_parms){
fluidRow(column(width=6, numericInput("k_m", "Growth coefficient k",
value=NA, min=0, max=10000, step=0.01)),
column(width=6, numericInput("t0_m", "Age at length 0 (t0)",
value=NA, min=0, max=10000, step=0.01)))
}
})
output$Male_parms_inputs3 <- renderUI({
if(input$male_parms){
fluidRow(column(width=6, textInput("CV_lt_m", "CV at length (young then old)", value="0.1,0.1")))
}
})
output$Male_parms_inputs4 <- renderUI({
if(input$male_parms){
fluidRow(column(width=6, numericInput("WLa_m", "Weight-length alpha",
value=0.00001, min=0, max=10000, step=0.000000001)),
column(width=6, numericInput("WLb_m", "Weight-length beta",
value=3, min=0, max=10000, step=0.01)))
}
})
output$Male_parms_inputs_label_fix <- renderUI({
if(input$male_parms_fix){
h5(em("Male"))
}
})
output$Male_parms_inputs1_fix <- renderUI({
if(input$male_parms_fix){
fluidRow(column(width=6, numericInput("M_m_fix", "Natural mortality",
value=NA, min=0, max=10000, step=0.01)),
column(width=6, numericInput("Linf_m_fix", "Asymptotic size (Linf)",
value=NA, min=0, max=10000, step=0.01)))
}
})
output$Male_parms_inputs2_fix <- renderUI({
if(input$male_parms_fix){
fluidRow(column(width=6, numericInput("k_m_fix", "Growth coefficient k",
value=NA, min=0, max=10000, step=0.01)),
column(width=6, numericInput("t0_m_fix", "Age at length 0 (t0)",
value=NA, min=0, max=10000, step=0.01)))
}
})
output$Male_parms_inputs3_fix <- renderUI({
if(input$male_parms_fix){
fluidRow(column(width=6, textInput("CV_lt_m_fix", "CV at length (young then old)", value="0.1,0.1")))
}
})
output$Male_parms_inputs4_fix <- renderUI({
if(input$male_parms_fix){
fluidRow(column(width=6, numericInput("WLa_m_fix", "Weight-Length alpha",
value=0.00001, min=0, max=10000, step=0.000000001)),
column(width=6, numericInput("WLb_m_fix", "Weight-length beta",
value=3, min=0, max=10000, step=0.01)))
}
})
output$Male_parms_inputs_label_est <- renderUI({
if(input$male_parms_est){
h4(em("Male"))
}
})
output$Male_parms_inputs_M_est <- renderUI({
if(input$male_parms_est){
dropdownButton(
selectInput("M_m_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal")),
numericInput("M_m_mean", "Mean", value=NA,min=0, max=10000, step=0.001),
numericInput("M_m_SD", "SD", value=0,min=0, max=10000, step=0.001),
numericInput("M_m_phase", "Phase", value=-1,min=-999, max=10, step=0.001),
circle = FALSE, right=TRUE, status = "danger", icon = icon("skull-crossbones"), width = "300px",label="Natural mortality"
)
}
})
output$Male_parms_inputs_space1 <- renderUI({
if(input$male_parms_est){
br()
}
})
output$Male_parms_inputs_space2 <- renderUI({
if(input$male_parms_est){
br()
}
})
output$Male_parms_inputs_space3 <- renderUI({
if(input$male_parms_est){
br()
}
})
output$Male_parms_inputs_space4 <- renderUI({
if(input$male_parms_est){
br()
}
})
output$Male_parms_inputs_space5 <- renderUI({
if(input$male_parms_est){
br()
}
})
output$Male_parms_inputs_Growth_label <- renderUI({
if(input$male_parms_est){
h5(strong("Growth"))
}
})
output$Male_parms_inputs_Linf_est <- renderUI({
if(input$male_parms_est){
dropdownButton(
selectInput("Linf_m_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal")),
numericInput("Linf_m_mean", "Mean", value=NA,min=0, max=10000, step=0.001),
numericInput("Linf_m_SD", "SD", value=0,min=0, max=10000, step=0.001),
numericInput("Linf_m_phase", "Phase", value=-1,min=-999, max=10, step=0.001),
circle = FALSE, right=TRUE, status = "danger", icon = icon("infinity"), width = "300px",label="Linf: Asymptotic size"
)
}
})
output$Male_parms_inputs_k_est <- renderUI({
if(input$male_parms_est){
dropdownButton(
selectInput("k_m_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal")),
numericInput("k_m_mean", "Mean", value=NA,min=0, max=10000, step=0.001),
numericInput("k_m_SD", "SD", value=0,min=0, max=10000, step=0.001),
numericInput("k_m_phase", "Phase", value=-1,min=-999, max=10, step=0.001),
circle = FALSE, right=TRUE, status = "danger", icon = icon("ruler-horizontal"), width = "300px",label="k: VB growth coefficient"
)
}
})
output$Male_parms_inputs_t0_est <- renderUI({
if(input$male_parms_est){
dropdownButton(
selectInput("t0_m_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal")),
numericInput("t0_m_mean", "Mean", value=0,min=-100, max=100, step=0.001),
numericInput("t0_m_SD", "SD", value=0,min=0, max=100, step=0.001),
numericInput("t0_m_phase", "Phase", value=-1,min=-999, max=100, step=0.001),
circle = FALSE, right=TRUE, status = "danger", icon = icon("baby-carriage"), width = "300px",label="t0: Age at size 0"
)
}
})
output$Male_parms_inputs_CV_est_young <- renderUI({
if(input$male_parms_est){
dropdownButton(
selectInput("CV_lt_m_young_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal")),
numericInput("CV_lt_m_young_mean", "Mean", value=0.1,min=0, max=10000, step=0.001),
numericInput("CV_lt_m_young_SD", "SD", value=0,min=0, max=10000, step=0.001),
numericInput("CV_lt_m_young_phase", "Phase", value=-1,min=-999, max=10, step=0.001),
circle = FALSE, right=TRUE, status = "danger", icon = icon("dice"), width = "300px",label="CV at length (young)"
)
}
})
output$Male_parms_inputs_CV_est_old <- renderUI({
if(input$male_parms_est){
dropdownButton(
selectInput("CV_lt_m_old_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal")),
numericInput("CV_lt_m_old_mean", "Mean", value=0.1,min=0, max=10000, step=0.001),
numericInput("CV_lt_m_old_SD", "SD", value=0,min=0, max=10000, step=0.001),
numericInput("CV_lt_m_old_phase", "Phase", value=-1,min=-999, max=10, step=0.001),
circle = FALSE, right=TRUE, status = "danger", icon = icon("dice"), width = "300px",label="CV at length (old)"
)
}
})
output$Male_parms_inputs_WL_est <- renderUI({
if(input$male_parms_est){
fluidRow(column(width=6, numericInput("WLa_m_est", "Weight-length alpha",
value=0.00001, min=0, max=10000, step=0.000000001)),
column(width=6, numericInput("WLb_m_est", "Weight-length beta",
value=3, min=0, max=10000, step=0.01)))
}
})
#h5(strong("M")),
# fluidRow(column(width=4,style='padding:1px;',align="center", selectInput("M_m_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal"))),
# column(width=3,style='padding:2px;',align="center",numericInput("M_m_mean", "Mean", value=NA,min=0, max=10000, step=0.001)),
# column(width=3,style='padding:2px;',align="center",numericInput("M_m_SD", "SD", value=0,min=0, max=10000, step=0.001)),
# column(width=2,style='padding:2px;',align="center",numericInput("M_m_phase", "Phase", value=-1,min=-999, max=10, step=0.001)))
# }
# })
# output$Male_parms_inputs_Linf_est <- renderUI({
# if(input$male_parms_est){
# #h5(strong("Linf")),
# fluidRow(column(width=4,style='padding:1px;',align="center",selectInput("Linf_m_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal"))),
# column(width=3,style='padding:2px;',align="center",numericInput("Linf_m_mean", "Mean", value=NA,min=0, max=10000, step=0.001)),
# column(width=3,style='padding:2px;',align="center",numericInput("Linf_m_SD", "SD", value=0,min=0, max=10000, step=0.001)),
# column(width=2,style='padding:2px;',align="center",numericInput("Linf_m_phase", "Phase", value=-1,min=-999, max=10, step=0.001)))
# }
# })
# output$Male_parms_inputs_k_est <- renderUI({
# if(input$male_parms_est){
# #h5(strong("k")),
# fluidRow(column(width=4,style='padding:2px;',selectInput("k_m_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal"))),
# column(width=3,style='padding:2px;',numericInput("k_m_mean", "Mean", value=NA,min=0, max=10000, step=0.001)),
# column(width=3,style='padding:2px;',numericInput("k_m_SD", "SD", value=0,min=0, max=10000, step=0.001)),
# column(width=2,style='padding:2px;',align="center",numericInput("k_m_phase", "Phase", value=-1,min=-999, max=10, step=0.001)))
# }
# })
# output$Male_parms_inputs_t0_est <- renderUI({
# if(input$male_parms_est){
# #h5(strong("t0")),
# fluidRow(column(width=4,style='padding:2px;',selectInput("t0_m_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal"))),
# column(width=3,style='padding:2px;',numericInput("t0_m_mean", "Mean", value=NA,min=0, max=10000, step=0.001)),
# column(width=3,style='padding:2px;',numericInput("t0_m_SD", "SD", value=0,min=0, max=10000, step=0.001)),
# column(width=2,style='padding:2px;',align="center",numericInput("t0_m_phase", "Phase", value=-1,min=-999, max=10, step=0.001)))
# }
# })
# output$Male_parms_inputs_CV_est <- renderUI({
# if(input$male_parms_est){
# #h5(strong("Length CV")),
# fluidRow(column(width=4,style='padding:2px;',selectInput("CV_lt_m_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal"))),
# column(width=3,style='padding:2px;',numericInput("CV_lt_m_mean", "Mean", value=0.1,min=0, max=10000, step=0.001)),
# column(width=3,style='padding:2px;',numericInput("CV_lt_m_SD", "SD", value=0,min=0, max=10000, step=0.001)),
# column(width=2,style='padding:2px;',align="center",numericInput("CV_lt_m_phase", "Phase", value=-1,min=-999, max=10, step=0.001)))
# }
# })
#Male life history parameters
output$Male_parms_inputs_label_SSS<- renderUI({
if(input$male_parms_SSS){
h5(em("Male"))
}
})
output$Male_parms_inputs_M_SSS<- renderUI({
if(input$male_parms_SSS){
dropdownButton(
selectInput("M_m_prior_sss","Prior type",c("lognormal","normal","uniform","no prior")),
numericInput("M_m_mean_sss", "Mean", value=NA,min=0, max=10000, step=0.001),
numericInput("M_m_SD_sss", "SD", value=0.44,min=0, max=10000, step=0.001),
circle = FALSE, right=TRUE, status = "danger", icon = icon("skull-crossbones"), width = "300px",label="Natural mortality"
)
}
})
output$Male_parms_inputs_space1_SSS <- renderUI({
if(input$male_parms_SSS){
br()
}
})
output$Male_parms_inputs_space2_SSS <- renderUI({
if(input$male_parms_SSS){
br()
}
})
output$Male_parms_inputs_space3_SSS <- renderUI({
if(input$male_parms_SSS){
br()
}
})
output$Male_parms_inputs_space4_SSS <- renderUI({
if(input$male_parms_SSS){
br()
}
})
output$Male_parms_inputs_space5_SSS <- renderUI({
if(input$male_parms_SSS){
br()
}
})
output$Male_parms_inputs_Growth_label_SSS <- renderUI({
if(input$male_parms_SSS){
h5(strong("Growth"))
}
})
output$Male_parms_inputs_Linf_SSS <- renderUI({
if(input$male_parms_SSS){
dropdownButton(
selectInput("Linf_m_prior_sss","Prior type",c("no prior","normal")),
numericInput("Linf_m_mean_sss", "Mean", value=NA,min=0, max=10000, step=0.001),
numericInput("Linf_m_SD_sss", "SD", value=0,min=0, max=10000, step=0.001),
circle = FALSE, right=TRUE, status = "danger", icon = icon("infinity"), width = "300px",label="Linf: Asymptotic size"
)
}
})
output$Male_parms_inputs_k_SSS <- renderUI({
if(input$male_parms_SSS){
dropdownButton(
selectInput("k_m_prior_sss","Prior type",c("no prior","normal")),
numericInput("k_m_mean_sss", "Mean", value=NA,min=0, max=10000, step=0.001),
numericInput("k_m_SD_sss", "SD", value=0,min=0, max=10000, step=0.001),
circle = FALSE, right=TRUE, status = "danger", icon = icon("ruler-horizontal"), width = "300px",label="k: VB growth coefficient"
)
}
})
output$Male_parms_inputs_t0_SSS <- renderUI({
if(input$male_parms_SSS){
dropdownButton(
selectInput("t0_m_prior_sss","Prior type",c("no prior","normal")),
numericInput("t0_m_mean_sss", "Mean", value=0,min=-100, max=100, step=0.001),
numericInput("t0_m_SD_sss", "SD", value=0,min=0, max=1000, step=0.001),
circle = FALSE, right=TRUE, status = "danger", icon = icon("baby-carriage"), width = "300px",label="t0: Age at size 0"
)
}
})
output$Male_parms_inputs_CV_young_SSS <- renderUI({
if(input$male_parms_SSS){
dropdownButton(
selectInput("CV_lt_m_young_prior_sss","Prior type",c("no prior")),
numericInput("CV_lt_m_young_mean_sss", "Mean", value=0.1,min=0, max=10000, step=0.001),
numericInput("CV_lt_m_young_SD_sss", "SD", value=0,min=0, max=10000, step=0.001),
circle = FALSE, right=TRUE, status = "danger", icon = icon("dice"), width = "300px",label="CV at length"
)
}
})
output$Male_parms_inputs_CV_old_SSS <- renderUI({
if(input$male_parms_SSS){
dropdownButton(
selectInput("CV_lt_m_old_prior_sss","Prior type",c("no prior")),
numericInput("CV_lt_m_old_mean_sss", "Mean", value=0.1,min=0, max=10000, step=0.001),
numericInput("CV_lt_m_old_SD_sss", "SD", value=0,min=0, max=10000, step=0.001),
circle = FALSE, right=TRUE, status = "danger", icon = icon("dice"), width = "300px",label="CV at length"
)
}
})
output$Male_parms_inputs_WL_SSS<- renderUI({
if(input$male_parms_SSS){
fluidRow(column(width=6,numericInput("WLa_m_sss", "Weight-Length alpha",
value=0.00001,min=0, max=10000, step=0.000000001)),
column(width=6,numericInput("WLb_m_sss","Weight-length beta",
value=3,min=0, max=10000, step=0.01)))
}
})
#Selectivity paramters
output$Sel_parms1 <- renderUI({
fluidRow(column(width=8, textInput("Sel50", "Length at 50% Selectivity",value="")),
column(width=4, textInput("Sel50_phase", "Est. phase", value="")))
})
output$Sel_parms2<- renderUI({
fluidRow(column(width=8, textInput("Selpeak", "Length at Peak Selectvity", value="")),
column(width=4, textInput("Selpeak_phase", "Est. phase", value="")))
})
output$Sel_parms3 <- renderUI({
if(input$Sel_choice=="Dome-shaped"){
fluidRow(column(width=8, textInput("PeakDesc", "Length at 1st declining selectivity",value="10000")),
column(width=4, textInput("PeakDesc_phase", "Est. phase",value="")))
}
})
output$Sel_parms4 <- renderUI({
if(input$Sel_choice=="Dome-shaped"){
fluidRow(column(width=8, textInput("LtPeakFinal", "Width of declining selectivity",value="0.0001")),
column(width=4, textInput("LtPeakFinal_phase", "Est. phase",value="")))
}
})
output$Sel_parms5 <- renderUI({
if(input$Sel_choice=="Dome-shaped"){
fluidRow(column(width=8, textInput("FinalSel", "Selectivity at max bin size",value="0.99999")),
column(width=4, textInput("FinalSel_phase", "Est. phase",value="")))
}
})
output$Sel_parms1_sss <- renderUI({
fluidRow(column(width=6, textInput("Sel50_sss", "Length at 50% Selectivity",value="")),
column(width=6, textInput("Selpeak_sss", "Length at Peak Selectvity", value="")))
})
output$Sel_parms2_sss <- renderUI({
if(input$Sel_choice_sss=="Dome-shaped"){
fluidRow(column(width=6, textInput("PeakDesc_sss", "Length at 1st declining selectivity",value="10000")),
column(width=6, textInput("LtPeakFinal_sss", "Width of declining selectivity",value="0.0001")))
}
})
output$Sel_parms3_sss <- renderUI({
if(input$Sel_choice_sss=="Dome-shaped"){
fluidRow(column(width=8, textInput("FinalSel_sss", "Selectivity at max bin size",value="0.99999")))
}
})
#Recruitment parameter inputs
output$Rec_options1 <- renderUI({
if(input$rec_choice){
fluidRow(column(width=6, numericInput("sigmaR", "Rec. varaibility (sR)",
value=0.5, min=0, max=10, step=0.01)))
}
})
output$Rec_options2 <- renderUI({
if(input$rec_choice){
fluidRow(column(width=6, numericInput("Rdev_startyr", "Rec. devs. start year",
value=input$styr, min=1, max=10000, step=1)),
column(width=6, numericInput("Rdev_endyr", "Rec. devs. end year",
value=input$endyr, min=1, max=10000, step=1)))
}
})
output$Rec_options3 <- renderUI({
if(input$biasC_choice){
fluidRow(column(width=6, numericInput("NobiasC_early", "Early last year",
value=input$styr, min=1, max=10000, step=1)),
column(width=6, numericInput("NobiasC_recent", "1st recent year",
value=input$endyr, min=1, max=10000, step=1)))
}
})
output$Rec_options4 <- renderUI({
if(input$biasC_choice){
fluidRow(column(width=6, numericInput("BiasC_startyr", "Start year",
value=input$styr, min=1, max=10000, step=1)),
column(width=6, numericInput("BiasC_endyr", "End year",
value=input$endyr, min=1, max=10000, step=1)))
}
})
output$Rec_options5 <- renderUI({
if(input$biasC_choice){
fluidRow(column(width=6, numericInput("BiasC","Maximum bias adjustment", value=1,min=0, max=1, step=0.001)))
}
})
output$Rec_options6 <- renderUI({
if(input$rec_choice){
fluidRow(column(width=6, selectInput("RecDevChoice","Recruit deviation option",c("1: Devs sum to zero","2: Simple deviations","3: deviation vector","4: option 3 plus penalties"),selected="1: Devs sum to zero")))
}
})
#Jitter value
output$Jitter_value <- renderUI({
if(input$jitter_choice){
fluidRow(column(width=6, numericInput("jitter_fraction", "Jitter value",
value=0.01, min=0, max=10, step=0.001)),
column(width=6, numericInput("Njitter", "# of jitters",
value=0, min=1, max=10000, step=1)))
}
})
#Choose reference points
output$RP_selection1<- renderUI({
if(input$RP_choices){
fluidRow(column(width=6, numericInput("SPR_target", "SPR target",
value=0.5, min=0, max=1, step=0.001)),
column(width=6, numericInput("B_target", "Biomass target",
value=0.4, min=0, max=1, step=0.001)))
}
})
output$RP_selection2<- renderUI({
if(input$RP_choices){
fluidRow(column(width=6,selectInput("CR_Ct_F","Control rule type",
c("1: Catch fxn of SSB, buffer on F",
"2: F fxn of SSB, buffer on F",
"3: Catch fxn of SSB, buffer on catch",
"4: F fxn of SSB, buffer on catch"))),
#column(width=4, numericInput("CR_Ct_F", "Control rule type",
# value=1, min=0, max=1, step=0.001)),
column(width=3, numericInput("slope_hi", "Upper ratio value",
value=0.4, min=0, max=1, step=0.001)),
column(width=3, numericInput("slope_low", "Lower ratio value",
value=0.1, min=0, max=1, step=0.001)))
}
})
output$Forecasts<- renderUI({
if(input$Forecast_choice){
fluidRow(column(width=6, numericInput("forecast_num", "# of forecast years",
value=2, min=1, max=1000, step=1)),
column(width=6, textInput("forecast_buffer", "Control rule buffer", value="1")))
}
})
output$AdvancedSS_nohess<- renderUI({
# if(input$advance_ss_click){
fluidRow(column(width=6, prettyCheckbox(
inputId = "no_hess", label = "Turn off Hessian (speeds up runs, but no variance estimation)",
shape = "round", outline = TRUE, status = "info")))
# }
})
output$AdvancedSS_nohess_user<- renderUI({
# if(input$advance_ss_click){
fluidRow(column(width=6, prettyCheckbox(
inputId = "no_hess", label = "Turn off Hessian (speeds up runs, but no variance estimation)",
shape = "round", outline = TRUE, status = "info")))
# }
})
output$AdvancedSS_addcomms<- renderUI({
fluidRow(column(width=6, prettyCheckbox(
inputId = "add_comms",
label = "Add additional SS run commands",
shape = "round", outline = TRUE, status = "info")))
})
output$AdvancedSS_addcomms_comms <- renderUI({
if(!is.null(input$add_comms)){
if(input$add_comms){
fluidRow(column(width=12, textInput("add_comms_in", "Enter additional run commands", value="")))
}
}
})
output$AdvancedSS_addcomms_user<- renderUI({
fluidRow(column(width=6, prettyCheckbox(
inputId = "add_comms",
label = "Add additional SS run commands",
shape = "round", outline = TRUE, status = "info")))
})
output$AdvancedSS_addcomms_comms_user <- renderUI({
if(!is.null(input$add_comms_user)){
if(input$add_comms_user){
fluidRow(column(width=12, textInput("add_comms_in", "Enter additional run commands", value="")))
}
}
})
output$AdvancedSS_noplots<- renderUI({
# if(input$advance_ss_click){
fluidRow(column(width=6, prettyCheckbox(
inputId = "no_plots_tables", label = "Turn off plots",
shape = "round", outline = TRUE, status = "info")))
# }
})
output$AdvancedSS_noplots_user<- renderUI({
# if(input$advance_ss_click){
fluidRow(column(width=6, prettyCheckbox(
inputId = "no_plots_tables", label = "Turn off plots",
shape = "round", outline = TRUE, status = "info")))
# }
})
output$AdvancedSS_noestabs<- renderUI({
# if(input$advance_ss_click){
fluidRow(column(width=6, prettyCheckbox(
inputId = "no_tables", label = "No exectutive summary tables",
shape = "round", outline = TRUE, status = "info")))
# }
})
output$AdvancedSS_noestabs_user<- renderUI({
# if(input$advance_ss_click){
fluidRow(column(width=6, prettyCheckbox(
inputId = "no_tables", label = "No exectutive summary tables",
shape = "round", outline = TRUE, status = "info")))
# }
})
output$AdvancedSS_par<- renderUI({
# if(input$advance_ss_click){
fluidRow(column(width=6, prettyCheckbox(
inputId = "use_par", label = "Use par file (i.e., parameter file from previous run)?",
shape = "round", outline = TRUE, status = "info")))
# }
})
output$AdvancedSS_par_user<- renderUI({
# if(input$advance_ss_click){
fluidRow(column(width=6, prettyCheckbox(
inputId = "use_par", label = "Use par file (i.e., parameter file from previous run)?",
shape = "round", outline = TRUE, status = "info")))
# }
})
output$AdvancedSS_phase0<- renderUI({
# if(input$advance_ss_click){
fluidRow(column(width=6, prettyCheckbox(
inputId = "use_phase0", label = "Turn off estimation of all parameters (phase = 0)?",
shape = "round", outline = TRUE, status = "info")))
# }
})
output$AdvancedSS_phase0_user<- renderUI({
# if(input$advance_ss_click){
fluidRow(column(width=6, prettyCheckbox(
inputId = "use_phase0", label = "Turn off estimation of all parameters (phase = 0)?",
shape = "round", outline = TRUE, status = "info")))
# }
})
output$AdvancedSS_datanew<- renderUI({
# if(input$advance_ss_click){
fluidRow(column(width=6, prettyCheckbox(
inputId = "use_datanew", label = "Use the data_echo.ss_new file?",
shape = "round", outline = TRUE, status = "info")))
# }
})
output$AdvancedSS_datanew_user<- renderUI({
# if(input$advance_ss_click){
fluidRow(column(width=6, prettyCheckbox(
inputId = "use_datanew", label = "Use the data_echo.ss_new file?",
shape = "round", outline = TRUE, status = "info")))
# }
})
output$AdvancedSS_controlnew<- renderUI({
# if(input$advance_ss_click){
fluidRow(column(width=6, prettyCheckbox(
inputId = "use_controlnew", label = "Use the control.ss_new file?",
shape = "round", outline = TRUE, status = "info")))
# }
})
output$AdvancedSS_controlnew_user<- renderUI({
# if(input$advance_ss_click){
fluidRow(column(width=6, prettyCheckbox(
inputId = "use_controlnew", label = "Use the control.ss_new file?",
shape = "round", outline = TRUE, status = "info")))
# }
})
output$AdvancedSS_forecastnew<- renderUI({
# if(input$advance_ss_click){
fluidRow(column(width=6, prettyCheckbox(
inputId = "use_forecastnew", label = "Use the forecast.ss_new file?",
shape = "round", outline = TRUE, status = "info")))
# }
})
output$AdvancedSS_forecastnew_user<- renderUI({
# if(input$advance_ss_click){
fluidRow(column(width=6, prettyCheckbox(
inputId = "use_forecastnew", label = "Use the forecast.ss_new file?",
shape = "round", outline = TRUE, status = "info")))
# }
})
output$AdvancedSS_GT1<- renderUI({
# if(input$advance_ss_click){
fluidRow(column(width=6, prettyCheckbox(
inputId = "GT1", label = "Use only one growth type (default is 5)",
shape = "round", outline = TRUE, status = "info")))
# }
})
output$AdvancedSS_GT5_SSS<- renderUI({
# if(input$advance_ss_click){
fluidRow(column(width=6, prettyCheckbox(
inputId = "GT5", label = "Use 5 growth types (default is 1)",
shape = "round", outline = TRUE, status = "info")))
# }
})
output$AdvancedSS_Sex3<- renderUI({
# if(input$advance_ss_click){
fluidRow(column(width=6, prettyCheckbox(
inputId = "Sex3", label = "Retain sex ratio in length compositions (Sex option = 3)",
shape = "round", outline = TRUE, status = "info")))
# }
})
output$AdvancedSS_Indexvar<- renderUI({
# if(input$advance_ss_click){
fluidRow(column(width=6, prettyCheckbox(
inputId = "Indexvar", label = "Estimate additional variance on each abundance index?",
shape = "round", outline = TRUE, status = "info")))
# }
})
output$AdvancedSS_ageerror<- renderUI({
fluidRow(column(width=12, prettyCheckbox(
inputId = "Ageing_error_choice", label = "Add custom ageing error matrices?",
shape = "round", outline = TRUE, status = "info")))
})
output$AdvancedSS_ageerror_in <- renderUI({
if(!is.null(input$Ageing_error_choice)){
if(input$Ageing_error_choice){
#h4(strong("Choose data file")),
fluidRow(column(width=12,fileInput('file33', 'Ageing error file',
accept = c(
'text/csv',
'text/comma-separated-values',
'text/tab-separated-values',
'text/plain',
'.csv'
)
)))
}
}
})
output$AdvancedSS_Ctunits<- renderUI({
fluidRow(column(width=12, prettyCheckbox(
inputId = "Ct_units_choice", label = "Specify catch units (1=biomass (default); 2=numbers) for each fleet?",
shape = "round", outline = TRUE, status = "info")))
})
output$AdvancedSS_Ctunitsfleets <- renderUI({
if(!is.null(input$Ct_units_choice)){
if(input$Ct_units_choice){
fluidRow(column(width=12, textInput("fleet_ct_units", "Enter catch units for each fleet", value="")))
}
}
})
output$AdvancedSS_Ctunits_SSS<- renderUI({
fluidRow(column(width=12, prettyCheckbox(
inputId = "Ct_units_choice_SSS", label = "Specify catch units for each fleet?",
shape = "round", outline = TRUE, status = "info")))
})
output$AdvancedSS_Ctunitsfleets_SSS<- renderUI({
if(!is.null(input$Ct_units_choice_SSS)){
if(input$Ct_units_choice_SSS){
fluidRow(column(width=12, textInput("fleet_ct_units_SSS", "Enter catch units for each fleet (1=biomass; 2=numbers)", value="")))
}
}
})
output$AdvancedSS_retro_choice<- renderUI({
fluidRow(column(width=6, prettyCheckbox(
inputId = "Retro_choice", label = "Do retrospective runs? Input minus from current year",
shape = "round", outline = TRUE, status = "info")))
})
output$AdvancedSS_retro_years <- renderUI({
if(!is.null(input$Retro_choice)){
if(input$Retro_choice){
fluidRow(column(width=6, numericInput("first_retro_year", "1st retro year",
value=-1, min=-1, max=-500, step=-1)),
column(width=6, numericInput("final_retro_year", "Last retro year",
value=-10, min=-1, max=-500, step=-1)))
}
}
})
output$AdvancedSS_retro_choice_user <- renderUI({
fluidRow(column(width=6, prettyCheckbox(
inputId = "Retro_choice", label = "Do retrospective runs? Input minus from current year",
shape = "round", outline = TRUE, status = "info")))
})
output$AdvancedSS_retro_years_user <- renderUI({
if(!is.null(input$Retro_choice)){
if(input$Retro_choice){
fluidRow(column(width=6, numericInput("first_retro_year", "1st retro year",
value=-1, min=-1, max=-500, step=-1)),
column(width=6, numericInput("final_retro_year", "Last retro year",
value=-10, min=-1, max=-500, step=-1)))
}
}
})
output$AdvancedSS_Ltbin <- renderUI({
# if(input$advance_ss_click){
if(!is.null(rv.Lt$data)){bin.step<-as.numeric(colnames(rv.Lt$data)[7])-as.numeric(colnames(rv.Lt$data)[6])}
if(is.null(rv.Lt$data)){bin.step<-2}
fluidRow(column(width=4, numericInput("lt_bin_size", "bin size",
value=bin.step, min=0, max=10000, step=1)),
column(width=4, numericInput("lt_min_bin", "minimum bin",
value=4, min=0, max=10000, step=0.01)),
column(width=4, numericInput("lt_max_bin", "maximum bin",
value=2*(round((Linf()+(Linf()*0.2326))/2))+2, min=0, max=10000, step=0.01)))
# }
})
output$Profile_multi_values <- renderUI({
#if(!is.null(input$multi_profile)){
# if(input$multi_profile){
#h4(strong("Choose data file")),
fluidRow(column(width=12,fileInput('file_multi_profile', 'Profile input values',
accept = c(
'text/csv',
'text/comma-separated-values',
'text/tab-separated-values',
'text/plain',
'.csv'
)
)))
# }
# }
})
# roots <- getVolumes()()
###############################################
###############################################
###############################################
################# PARAMETERS ##################
###############################################
FleetNs<-reactive({
if(all(c(is.null(rv.Ct$data[,2],rv.Lt$data[,3],rv.Age$data[,3],rv.Index$data[,3])))) return(NULL)
fleetnum<-rep(1,max(rv.Lt$data[,3],rv.Age$data[,3],rv.Index$data[,3]))
FleetNs<-paste(as.character(fleetnum), collapse=",")
#print(FleetNs)
FleetNs
})
Nages<-reactive({
Nages<-NA
if(all(c(is.null(input$M_f),is.null(input$M_f_fix),is.null(input$M_f_mean),is.null(input$M_f_mean_sss),is.null(rv.Age$data)))) return(NULL)
if(!is.na(input$M_f)) {Nages<-ceiling(5.4/input$M_f)}
if(!is.na(input$M_f_fix)) {Nages<-ceiling(5.4/input$M_f_fix)}
if(!is.na(input$M_f_mean)) {Nages<-ceiling(5.4/input$M_f_mean)}
if(!is.na(input$M_f_mean_sss)) {Nages<-ceiling(5.4/input$M_f_mean_sss)}
if(!is.null(rv.Age$data))
{
Nages_in<-max(as.numeric(colnames(rv.Age$data[,9:ncol(rv.Age$data)])))
if(!is.na(Nages)&Nages_in>Nages){Nages<-Nages_in}
if(is.na(Nages)){Nages<-Nages_in}
}
Nages
})
M_f_in<-reactive({
M_f_in<-NA
if(all(c(is.null(input$M_f),is.null(input$M_f_fix),is.null(input$M_f_mean),is.null(input$M_f_mean_sss)))) return(NULL)
if(!is.na(input$M_f)) {M_f_in<-input$M_f}
if(!is.na(input$M_f_fix)) {M_f_in<-input$M_f_fix}
if(!is.na(input$M_f_mean)) {M_f_in<-input$M_f_mean}
if(!is.na(input$M_f_mean_sss)) {M_f_in<-input$M_f_mean_sss}
M_f_in
})
M_m_in<-reactive({
M_m_in<-NA
if(all(c(is.null(input$M_m),is.null(input$M_m_fix),is.null(input$M_m_mean),is.null(input$M_m_mean_sss)))) return(NULL)
if(any(input$male_parms&!is.na(input$M_m))) {M_m_in<-input$M_m}
if(any(input$male_parms_fix&!is.na(input$M_m_fix))) {M_m_in<-input$M_m_fix}
if(any(input$male_parms_est&!is.na(input$M_m_mean))) {M_m_in<-input$M_m_mean}
if(any(input$male_parms_SSS&!is.na(input$M_m_mean_sss))) {M_m_in<-input$M_m_mean_sss}
M_m_in
})
Linf<-reactive({
Linf<-NA
if(all(c(is.null(input$Linf_f),is.null(input$Linf_f_fix),is.null(input$Linf_f_mean),is.null(input$Linf_f_mean_sss)))) return(NULL)
if(!is.na(input$Linf_f)) {Linf<-input$Linf_f}
if(!is.na(input$Linf_f_fix)) {Linf<-input$Linf_f_fix}
if(!is.na(input$Linf_f_mean)) {Linf<-input$Linf_f_mean}
if(!is.na(input$Linf_f_mean_sss)) {Linf<-input$Linf_f_mean_sss}
Linf
})
Linf_m_in<-reactive({
Linf_m_in<-NA
if(all(c(is.null(input$Linf_m),is.null(input$Linf_m_fix),is.null(input$Linf_m_mean),is.null(input$Linf_m_mean_sss)))) return(NULL)
if(any(input$male_parms&!is.na(input$Linf_m))) {Linf_m_in<-input$Linf_m}
if(any(input$male_parms_fix&!is.na(input$Linf_m_fix))) {Linf_m_in<-input$Linf_m_fix}
if(any(input$male_parms_est&!is.na(input$Linf_m_mean))) {Linf_m_in<-input$Linf_m_mean}
if(any(input$male_parms_SSS&!is.na(input$Linf_m_mean_sss))) {Linf_m_in<-input$Linf_m_mean_sss}
Linf_m_in
})
k_vbgf<-reactive({
k_vbgf<-NA
if(all(c(is.null(input$k_f),is.null(input$k_f_fix),is.null(input$k_f_mean),is.null(input$k_f_mean_sss)))) return(NULL)
if(!is.na(input$k_f)) {k_vbgf<-input$k_f}
if(!is.na(input$k_f_fix)) {k_vbgf<-input$k_f_fix}
if(!is.na(input$k_f_mean)) {k_vbgf<-input$k_f_mean}
if(!is.na(input$k_f_mean_sss)) {k_vbgf<-input$k_f_mean_sss}
k_vbgf
})
#Process life history input for plots
k_vbgf_m_in<-reactive({
k_vbgf_m_in<-NA
if(all(c(is.null(input$k_m),is.null(input$k_m_fix),is.null(input$k_m_mean),is.null(input$k_m_mean_sss)))) return(NULL)
if(any(input$male_parms&!is.na(input$k_m))) {k_vbgf_m_in<-input$k_m}
if(any(input$male_parms_fix&!is.na(input$k_m_fix))) {k_vbgf_m_in<-input$k_m_fix}
if(any(input$male_parms_est&!is.na(input$k_m_mean))) {k_vbgf_m_in<-input$k_m_mean}
if(any(input$male_parms_SSS&!is.na(input$k_m_mean_sss))) {k_vbgf_m_in<-input$k_m_mean_sss}
k_vbgf_m_in
})
t0_vbgf<-reactive({
t0_vbgf<-NA
if(all(c(is.null(input$t0_f),is.null(input$t0_f_fix),is.null(input$t0_f_mean),is.null(input$t0_f_mean_sss)))) return(NULL)
if(!is.na(input$t0_f)) {t0_vbgf<-input$t0_f}
if(!is.na(input$t0_f_fix)) {t0_vbgf<-input$t0_f_fix}
if(!is.na(input$t0_f_mean)) {t0_vbgf<-input$t0_f_mean}
if(!is.na(input$t0_f_mean_sss)) {t0_vbgf<-input$t0_f_mean_sss}
t0_vbgf
})
t0_vbgf_m_in<-reactive({
t0_vbgf_m_in<-NA
if(all(c(is.null(input$t0_m),is.null(input$t0_m_fix),is.null(input$t0_m_mean),is.null(input$t0_m_mean_sss)))) return(NULL)
if(any(input$male_parms&!is.na(input$t0_m))) {t0_vbgf_m_in<-input$t0_m}
if(any(input$male_parms_fix&!is.na(input$t0_m_fix))) {t0_vbgf_m_in<-input$t0_m_fix}
if(any(input$male_parms_est&!is.na(input$t0_m_mean))) {t0_vbgf_m_in<-input$t0_m_mean}
if(any(input$male_parms_SSS&!is.na(input$t0_m_mean_sss))) {t0_vbgf_m_in<-input$t0_m_mean_sss}
t0_vbgf_m_in
})
L50<-reactive({
L50<-NA
if(all(c(is.null(input$L50_f),is.null(input$L50_f_fix),is.null(input$L50_f_est),is.null(input$L50_f_sss)))) return(NULL)
if(!is.na(input$L50_f)) {L50<-input$L50_f}
if(!is.na(input$L50_f_fix)) {L50<-input$L50_f_fix}
if(!is.na(input$L50_f_est)) {L50<-input$L50_f_est}
if(!is.na(input$L50_f_sss)) {L50<-input$L50_f_sss}
L50
})
L95<-reactive({
L95<-NA
if(all(c(is.null(input$L95_f),is.null(input$L95_f_fix),is.null(input$L95_f_est),is.null(input$L95_f_sss)))) return(NULL)
if(!is.na(input$L95_f)) {L95<-input$L95_f}
if(!is.na(input$L95_f_fix)) {L95<-input$L95_f_fix}
if(!is.na(input$L95_f_est)) {L95<-input$L95_f_est}
if(!is.na(input$L95_f_sss)) {L95<-input$L95_f_sss}
L95
})
#############
### PLOTS ###
#############
##################
### CATCH PLOT ###
##################
observeEvent(req(!is.null(rv.Ct$data)), {
shinyjs::show(output$catch_plots_label<-renderText({"Removal history"}))
})
observeEvent(req(!is.null(rv.Ct$data)), {
output$Ctplot_it<-renderUI({
if(!is.null(rv.Ct$data))
{
output$Ctplot <- renderPlot({
if (is.null(rv.Ct$data)) return(NULL)
rv.Ct$data %>%
pivot_longer(-1, names_to = "Fleet", values_to = "catch") %>%
ggplot(aes_string(names(.)[1], "catch", color = "Fleet")) +
geom_point() +
geom_line(lwd=1.5) +
ylab("Removals") +
xlab("Year") +
scale_color_viridis_d()
})
plotOutput("Ctplot")
}
})
})
##########################
### LENGTH COMPS PLOTS ###
##########################
observeEvent(req(!is.null(rv.Lt$data)), {
shinyjs::show(output$lt_comp_plots_label<-renderText({"Length compositions"}))
})
observeEvent(req(!is.null(rv.Lt$data)), {
output$Ltplot_it<-renderUI({
if(!is.null(rv.Lt$data))
{
output$Ltplot<-renderPlot({
if (is.null(rv.Lt$data)) return(NULL)
rv.Lt$data %>%
rename_all(tolower) %>%
dplyr::select(-nsamps) %>%
pivot_longer(c(-year, -fleet, -sex)) %>%
mutate(Year = factor(year),
name = as.numeric(gsub("[^0-9.-]", "", name))) %>%
ggplot(aes(name, value, color=Year)) +
geom_line() +
#geom_col(position="dodge") +
facet_grid(sex~fleet, scales="free_y",labeller = label_both) +
# facet_wrap(sex~year, scales="free_y",ncol=5) +
xlab("Length bin") +
ylab("Frequency") +
scale_fill_viridis_d()+
geom_vline(xintercept = L50)
})
plotOutput("Ltplot")
}
})
})
# observeEvent(req(!is.null(input$file1)), {
# output$Ltplot<-renderPlot({
# inFile<- input$file1
# # if (is.null(inFile)) {
# # return(NULL)
# # shinyjs::hide("Ltplot")}
# # else{
# Lt.comp.data<-read.csv(inFile$datapath,check.names=FALSE)
# lt.dat.plot<-(Lt.comp.data)[,c(-4)]
# dat.gg<-melt(lt.dat.plot,id=colnames(lt.dat.plot)[1:3])
# colnames(dat.gg)<-c("year","fleet","sex","bin","ltnum")
# ggplot(dat.gg,aes(bin,ltnum,fill=factor(fleet)))+
# geom_col(color="white",position="dodge")+
# #geom_col(fill="#236192",color="white")+
# facet_wrap(~year,scales="free_y")+
# xlab("Length bin")+
# ylab("Frequency")+
# labs(fill="Fleet")+
# scale_fill_viridis(discrete=TRUE, option="viridis")
# #scale_x_discrete(breaks=c(1,5,10,20),labels=as.character(levels(dat.gg$bin))[c(1,5,10,20)])
# #scale_fill_brewer(palette = "BuPu")
# # }
# })
# })
#################
### AGE PLOTS ###
#################
observeEvent(req(!is.null(rv.Age$data)), {
shinyjs::show(output$marginal_age_comp_plots_label<-renderText({"Marginal age compositions"}))
})
observeEvent(req(!is.null(rv.Age$data)), {
shinyjs::show(output$conditional_age_comp_plots_label<-renderText({"Conditional age at length"}))
})
observeEvent(req(!is.null(rv.Age$data)), {
marginal_ages<-subset(rv.Age$data,Lbin_hi<0)
Cond_ages<-subset(rv.Age$data,Lbin_hi>=0)
output$Ageplot_it_marginal<-renderUI({
if(!is.null(rv.Age$data))
{
output$Ageplot_marginal<-renderPlot({
#inFile_age <- rv.Age$data
# if (is.null(rv.Age$data)) return(NULL)
if (nrow(marginal_ages)==0) return(NULL)
# rv.Age$data %>%
marginal_ages %>%
rename_all(tolower) %>%
dplyr::select(-nsamps,-lbin_hi) %>%
pivot_longer(c(-year, -fleet, -sex, -lbin_low)) %>%
mutate(Year = factor(year),
name = as.numeric(gsub("[^0-9.-]", "", name))) %>%
ggplot(aes(name, value, color=Year)) +
geom_line() +
# geom_col(position="dodge") +
#facet_wrap(sex~year, scales="free_y",ncol=5) +
facet_grid(sex~fleet, scales="free_y",labeller = label_both) +
#scale_y_continuous(limits=c(0,max(colSums(rv.Age$data[-1,7:ncol(rv.Age$data)]))))+
#scale_y_continuous(limits=c(0,20))+
xlab("Age bin") +
ylab("Frequency") +
scale_fill_viridis_d()
})
plotOutput("Ageplot_marginal")
}
})
output$Ageplot_it_cond<-renderUI({
if(!is.null(rv.Age$data))
{
output$Ageplot_conditional<-renderPlot({
# if (is.null(rv.Age$data)) return(NULL)
if (nrow(Cond_ages)==0) return(NULL)
Cond_ages_plots<-melt(Cond_ages[,c(1,3,4,7,9:ncol(Cond_ages))],id.vars=c("Year","Fleet","Sex","Lbin_hi"))
Cond_ages_plots_pos<-subset(Cond_ages_plots,value>0)
ggplot(Cond_ages_plots_pos,aes(x=as.numeric(variable),y=as.numeric(Lbin_hi),color=Year))+
geom_point()+
facet_grid(vars(Sex),vars(Fleet),labeller = label_both)+
xlab("Age bin")+
ylab("Length bin")
})
plotOutput("Ageplot_conditional")
}
})
})
# output$Ageplot <- renderPlot({
# inFile_age <- rv.Age$data
# if (is.null(inFile_age)) return(NULL)
# rv.Age$data %>%
# pivot_longer(-1, names_to = "year", values_to = "ltnum") %>%
# rename(bin = Bins) %>%
# ggplot(aes(bin, ltnum)) +
# geom_col(fill="#1D252D", color="white") +
# facet_wrap(~year) +
# xlab("Age bin") +
# ylab("Frequency")
# })
##################
### INDEX PLOT ###
##################
observeEvent(req(!is.null(rv.Index$data)), {
shinyjs::show(output$index_plots_label<-renderText({"Indices of Abundance"}))
})
observeEvent(req(!is.null(rv.Index$data)), {
output$Indexplot_it<-renderUI({
if(!is.null(rv.Index$data))
{
output$Indexplot <- renderPlot({
if (is.null(rv.Index$data)) return(NULL)
plot.Index<-rv.Index$data
plot.Index[,3]<-as.factor(plot.Index[,3])
plot.Index.zscore<-list()
for(i in 1:length(unique(plot.Index$Fleet)))
{
plot.Index.temp<-plot.Index[plot.Index$Fleet %in% unique(plot.Index$Fleet)[i],]
plot.Index.temp$Index<-(plot.Index.temp$Index-mean(plot.Index.temp$Index))/sd(plot.Index.temp$Index)
plot.Index.zscore[[i]]<-plot.Index.temp
}
plot.Index.zs<-do.call("rbind", plot.Index.zscore)
ggplot(plot.Index.zs,aes(x=Year,y=Index,group=Fleet, colour=Fleet)) +
geom_line(lwd=1.1) +
geom_errorbar(aes(ymin=qlnorm(0.0275,log(Index),CV),ymax=qlnorm(0.975,log(Index),CV),group=Fleet),width=0,size=1)+
geom_point(aes(colour=Fleet),size=4) +
ylab("Z-score") +
xlab("Year") +
scale_color_viridis_d()
})
plotOutput("Indexplot")
}
})
})
#####################
### Plot M by age ###
#####################
output$Mplot<-renderPlot({
mf.in = M_f_in()+0.000000000000001
mm.in = M_f_in()+0.000000000000001
# if(input$male_parms|input$male_parms_fix)
if(input$male_parms|input$male_parms_SSS|input$male_parms_fix|input$male_parms_est)
{
mm.in = M_m_in()+0.000000000000001
}
if(any(is.na(c(mf.in, mm.in)))|any(is.null(c(mf.in, mm.in)))) return(NULL)
Female_M = data.frame(Ages = 0:Nages(), PopN = exp(-mf.in * 0:Nages()), Sex="Female")
Male_M = data.frame(Ages = 0:Nages(), PopN=exp(-mm.in * 0:Nages()), Sex="Male")
M_sexes <- rbind(Female_M, Male_M)
Nage_4_plot <- grobTree(textGrob(paste0("Max age =", Nages()), x=0.1, y=0.95, hjust=0,
gp=gpar(col="darkblue", fontsize=12, fontface="italic")))
ggplot(M_sexes,aes(Ages, PopN, color=Sex))+
geom_line(aes(linetype=Sex), lwd=2)+
ylab("Cohort decline by M")+
annotation_custom(Nage_4_plot)
})
##############################
### Plot VBGF and maturity ###
##############################
output$VBGFplot<-renderPlot({
f_Linf = m_Linf = Linf()
f_k = m_k = k_vbgf()
f_t0 = m_t0 = t0_vbgf()
f_L50 = L50()
f_L95 = L95()
maxage = Nages()
if(any(input$male_parms,input$male_parms_SSS,input$male_parms_fix,input$male_parms_est))
{
m_Linf = Linf_m_in()
m_k = k_vbgf_m_in()
m_t0 = t0_vbgf_m_in()
}
if(any(is.na(c(f_Linf, f_k, f_t0)))=="FALSE"){
vbgf_female = data.frame(Age = c(f_t0:Nages()),
Length = VBGF(f_Linf, f_k, f_t0, c(f_t0:Nages())), Sex="Female")
vbgf_male = data.frame(Age = f_t0:Nages(),
Length=VBGF(m_Linf, m_k, m_t0, c(f_t0:Nages())), Sex="Male")
rbind(vbgf_female,vbgf_male) %>%
ggplot(aes(Age, Length, color=Sex)) +
geom_line(aes(linetype=Sex), lwd=2) -> vbgf.plot
if(any(is.na(c(f_L50, f_L95)))=="FALSE"){
age.mat = data.frame(Age = VBGF.age(f_Linf, f_k, f_t0, c(f_L50, f_L95)),
Length = c(f_L50, f_L95), Sex="Female")
vbgf.plot +
geom_point(data = age.mat, aes(Age, Length), color = "darkorange", size=6) +
geom_text(data = age.mat,label=c("Lmat50%", "Lmat95%"),
nudge_x = -0.1 * Nages(), color="black") -> vbgf.plot
}
vbgf.plot
}
})
###################
### Selectivity ###
###################
# observeEvent(req(input$Sel50,input$Selpeak), {
# shinyjs::show(output$Sel_plots_label<-renderText({"Selectivity"}))
# })
#h4("Selectivity")
output$Dep_plot_title<-renderUI({
if(((as.numeric(input$tabs)*1)/1)<4&is.null(rv.Lt$data)&!is.null(rv.Ct$data)&is.null(rv.Age$data)&is.null(rv.Index$data)&any(is.null(input$user_model),!input$user_model)){
h4("Relative Stock Status Prior")
}
})
output$Dep_plot_it<-renderUI({
if(((as.numeric(input$tabs)*1)/1)<4&is.null(rv.Lt$data)&!is.null(rv.Ct$data)&is.null(rv.Age$data)&is.null(rv.Index$data)&any(is.null(input$user_model),!input$user_model)){
output$Depletion_plot <- renderPlot({
if(!is.na(input$status_year)&!is.na(input$Depl_mean_sss))
{
if(input$Depl_prior_sss=="beta"){dep.hist.sss<-data.frame(draws=rbeta.ab(100000,input$Depl_mean_sss,input$Depl_SD_sss,0,1))}
if(input$Depl_prior_sss=="lognormal"){dep.hist.sss<-data.frame(draws=rlnorm(100000,log(input$Depl_mean_sss),input$Depl_SD_sss))}
if(input$Depl_prior_sss=="truncated normal"){dep.hist.sss<-data.frame(draws=rtruncnorm(100000,0,1,input$Depl_mean_sss,input$Depl_SD_sss))}
if(input$Depl_prior_sss=="uniform"){dep.hist.sss<-data.frame(draws=runif(100000,input$Depl_mean_sss,input$Depl_SD_sss))}
if(input$Depl_prior_sss=="no prior"){NULL}
Depletion_plot<-gghistogram(dep.hist.sss, x = "draws", fill = "purple")
Depletion_plot
}
})
plotOutput("Depletion_plot")
}
})
output$Selplot <- renderPlot({
if(input$Sel_choice=="Logistic"&any(any(input$Sel50[1]=="",is.null(input$Sel50)),any(input$Selpeak[1]=="",is.null(input$Selpeak)))) return(NULL)
if(input$Sel_choice=="Logistic")
{
if(all(length(as.numeric(trimws(unlist(strsplit(input$Sel50,",")))))==length(as.numeric(trimws(unlist(strsplit(input$Selpeak,","))))),
all(input$Sel50!=""),
all(!is.null(input$Sel50)),
all(input$Selpeak!=""),
all(!is.null(input$Selpeak))))
{
Sel50<-as.numeric(trimws(unlist(strsplit(input$Sel50,","))))
Selpeak<-as.numeric(trimws(unlist(strsplit(input$Selpeak,","))))
PeakDesc<-rep(10000,length(Selpeak))
LtPeakFinal<-rep(0.0001,length(Selpeak))
FinalSel<-rep(0.999,length(Selpeak))
# if(input$Sel_choice=="Logistic")
# {
# }
# if(input$Sel_choice=="Dome-shaped")
# {
# PeakDesc<-as.numeric(trimws(unlist(strsplit(input$PeakDesc,","))))
# LtPeakFinal<-as.numeric(trimws(unlist(strsplit(input$LtPeakFinal,","))))
# FinalSel<-as.numeric(trimws(unlist(strsplit(input$FinalSel,","))))
# }
Sel.out<-doubleNorm24.sel(Sel50=Sel50[1],Selpeak=Selpeak[1],PeakDesc=PeakDesc[1],LtPeakFinal=LtPeakFinal[1],FinalSel=FinalSel[1])
Sel.out<-data.frame(Bin=Sel.out[,1],Sel=Sel.out[,2],Fleet="Fleet 1")
if(length(Sel50)>1)
{
for(ii in 2:length(Sel50))
{
Sel.out.temp<-doubleNorm24.sel(Sel50=Sel50[ii],Selpeak=Selpeak[ii],PeakDesc=PeakDesc[ii],LtPeakFinal=LtPeakFinal[ii],FinalSel=FinalSel[ii])
Sel.out.temp<-data.frame(Bin=Sel.out.temp[,1],Sel=Sel.out.temp[,2],Fleet=paste0("Fleet ",ii))
Sel.out<-rbind(Sel.out,Sel.out.temp)
}
}
selplot.out<-ggplot(Sel.out,aes(Bin,Sel,colour=Fleet)) +
geom_line(lwd=1.5) +
ylab("Length Bins") +
xlab("Selectivity") +
scale_color_viridis_d()
}
}
if(input$Sel_choice=="Dome-shaped")
{
if(all(length(as.numeric(trimws(unlist(strsplit(input$Sel50,",")))))==length(as.numeric(trimws(unlist(strsplit(input$Selpeak,","))))),
length(as.numeric(trimws(unlist(strsplit(input$Sel50,",")))))==length(as.numeric(trimws(unlist(strsplit(input$PeakDesc,","))))),
length(as.numeric(trimws(unlist(strsplit(input$Sel50,",")))))==length(as.numeric(trimws(unlist(strsplit(input$LtPeakFinal,","))))),
length(as.numeric(trimws(unlist(strsplit(input$Sel50,",")))))==length(as.numeric(trimws(unlist(strsplit(input$FinalSel,","))))),
all(input$Sel50!=""),
all(!is.null(input$Sel50)),
all(input$Selpeak!=""),
all(!is.null(input$Selpeak))))
{
Sel50<-as.numeric(trimws(unlist(strsplit(input$Sel50,","))))
Selpeak<-as.numeric(trimws(unlist(strsplit(input$Selpeak,","))))
PeakDesc<-as.numeric(trimws(unlist(strsplit(input$PeakDesc,","))))
LtPeakFinal<-as.numeric(trimws(unlist(strsplit(input$LtPeakFinal,","))))
FinalSel<-as.numeric(trimws(unlist(strsplit(input$FinalSel,","))))
# if(input$Sel_choice=="Logistic")
# {
# PeakDesc<-rep(10000,length(Selpeak))
# LtPeakFinal<-rep(0.0001,length(Selpeak))
# FinalSel<-rep(0.999,length(Selpeak))
# }
# if(input$Sel_choice=="Dome-shaped")
# {
# }
Sel.out<-doubleNorm24.sel(Sel50=Sel50[1],Selpeak=Selpeak[1],PeakDesc=PeakDesc[1],LtPeakFinal=LtPeakFinal[1],FinalSel=FinalSel[1])
Sel.out<-data.frame(Bin=Sel.out[,1],Sel=Sel.out[,2],Fleet="Fleet 1")
if(length(Sel50)>1)
{
for(ii in 2:length(Sel50))
{
Sel.out.temp<-doubleNorm24.sel(Sel50=Sel50[ii],Selpeak=Selpeak[ii],PeakDesc=PeakDesc[ii],LtPeakFinal=LtPeakFinal[ii],FinalSel=FinalSel[ii])
Sel.out.temp<-data.frame(Bin=Sel.out.temp[,1],Sel=Sel.out.temp[,2],Fleet=paste0("Fleet ",ii))
Sel.out<-rbind(Sel.out,Sel.out.temp)
}
}
selplot.out<-ggplot(Sel.out,aes(Bin,Sel,colour=Fleet)) +
geom_line(lwd=1.5) +
ylab("Length Bins") +
xlab("Selectivity") +
scale_color_viridis_d()
}
}
if(!is.null(get0("selplot.out"))){return(selplot.out)}
else(return(NULL))
})
output$Selplot_SSS <- renderPlot({
if(input$Sel_choice_sss=="Logistic"&any(any(input$Sel50_sss[1]=="",is.null(input$Sel50_sss)),any(input$Selpeak_sss[1]=="",is.null(input$Selpeak_sss)))) return(NULL)
if(input$Sel_choice_sss=="Logistic")
{
if(all(length(as.numeric(trimws(unlist(strsplit(input$Sel50_sss,",")))))==length(as.numeric(trimws(unlist(strsplit(input$Selpeak_sss,","))))),
all(input$Sel50_sss!=""),
all(!is.null(input$Sel50_sss)),
all(input$Selpeak_sss!=""),
all(!is.null(input$Selpeak_sss))))
{
Sel50<-as.numeric(trimws(unlist(strsplit(input$Sel50_sss,","))))
Selpeak<-as.numeric(trimws(unlist(strsplit(input$Selpeak_sss,","))))
PeakDesc<-rep(10000,length(Selpeak))
LtPeakFinal<-rep(0.0001,length(Selpeak))
FinalSel<-rep(0.999,length(Selpeak))
Sel.out<-doubleNorm24.sel(Sel50=Sel50[1],Selpeak=Selpeak[1],PeakDesc=PeakDesc[1],LtPeakFinal=LtPeakFinal[1],FinalSel=FinalSel[1])
Sel.out<-data.frame(Bin=Sel.out[,1],Sel=Sel.out[,2],Fleet="Fleet 1")
if(length(Sel50)>1)
{
for(ii in 2:length(Sel50))
{
Sel.out.temp<-doubleNorm24.sel(Sel50=Sel50[ii],Selpeak=Selpeak[ii],PeakDesc=PeakDesc[ii],LtPeakFinal=LtPeakFinal[ii],FinalSel=FinalSel[ii])
Sel.out.temp<-data.frame(Bin=Sel.out.temp[,1],Sel=Sel.out.temp[,2],Fleet=paste0("Fleet ",ii))
Sel.out<-rbind(Sel.out,Sel.out.temp)
}
}
selplot.out<-ggplot(Sel.out,aes(Bin,Sel,colour=Fleet)) +
geom_line(lwd=1.5) +
ylab("Length Bins") +
xlab("Selectivity") +
scale_color_viridis_d()
}
}
if(input$Sel_choice_sss=="Dome-shaped")
{
if(all(length(as.numeric(trimws(unlist(strsplit(input$Sel50_sss,",")))))==length(as.numeric(trimws(unlist(strsplit(input$Selpeak_sss,","))))),
length(as.numeric(trimws(unlist(strsplit(input$Sel50_sss,",")))))==length(as.numeric(trimws(unlist(strsplit(input$PeakDesc_sss,","))))),
length(as.numeric(trimws(unlist(strsplit(input$Sel50_sss,",")))))==length(as.numeric(trimws(unlist(strsplit(input$LtPeakFinal_sss,","))))),
length(as.numeric(trimws(unlist(strsplit(input$Sel50_sss,",")))))==length(as.numeric(trimws(unlist(strsplit(input$FinalSel_sss,","))))),
all(input$Sel50_sss!=""),
all(!is.null(input$Sel50_sss)),
all(input$Selpeak_sss!=""),
all(!is.null(input$Selpeak_sss))))
{
Sel50<-as.numeric(trimws(unlist(strsplit(input$Sel50_sss,","))))
Selpeak<-as.numeric(trimws(unlist(strsplit(input$Selpeak_sss,","))))
PeakDesc<-as.numeric(trimws(unlist(strsplit(input$PeakDesc_sss,","))))
LtPeakFinal<-as.numeric(trimws(unlist(strsplit(input$LtPeakFinal_sss,","))))
FinalSel<-as.numeric(trimws(unlist(strsplit(input$FinalSel_sss,","))))
Sel.out<-doubleNorm24.sel(Sel50=Sel50[1],Selpeak=Selpeak[1],PeakDesc=PeakDesc[1],LtPeakFinal=LtPeakFinal[1],FinalSel=FinalSel[1])
Sel.out<-data.frame(Bin=Sel.out[,1],Sel=Sel.out[,2],Fleet="Fleet 1")
if(length(Sel50)>1)
{
for(ii in 2:length(Sel50))
{
Sel.out.temp<-doubleNorm24.sel(Sel50=Sel50[ii],Selpeak=Selpeak[ii],PeakDesc=PeakDesc[ii],LtPeakFinal=LtPeakFinal[ii],FinalSel=FinalSel[ii])
Sel.out.temp<-data.frame(Bin=Sel.out.temp[,1],Sel=Sel.out.temp[,2],Fleet=paste0("Fleet ",ii))
Sel.out<-rbind(Sel.out,Sel.out.temp)
}
}
selplot.out<-ggplot(Sel.out,aes(Bin,Sel,colour=Fleet)) +
geom_line(lwd=1.5) +
ylab("Length Bins") +
xlab("Selectivity") +
scale_color_viridis_d()
}
}
if(!is.null(get0("selplot.out"))){return(selplot.out)}
else(return(NULL))
})
#############################################
### END PLOTS ###
#############################################
#############################################
######## PREPARE FILES andD RUN SSS #########
#############################################
SSS.run<-observeEvent(input$run_SSS,{
show_modal_spinner(spin="flower",color=wes_palettes$Zissou1[1],text="Create model files")
print(1)
# progress <- shiny::Progress$new(session, min=1, max=2)
# on.exit(progress$close())
# progress$set(message = 'Model run in progress',
# detail = '')
# for (i in 1:2) {
# progress$set(value = i)
# Sys.sleep(0.5)
# }
#Copy and move files
if(file.exists(paste0("Scenarios/",input$Scenario_name)))
{
unlink(paste0("Scenarios/",input$Scenario_name),recursive=TRUE)
# file.remove(paste0(getwd(),"/Scenarios/",input$Scenario_name))
}
#if(input$)
{
file.copy(paste0("SSS_files/sssexample_BH"),paste0("Scenarios"),recursive=TRUE,overwrite=TRUE)
file.rename(paste0("Scenarios/sssexample_BH"), paste0("Scenarios/",input$Scenario_name))
}
#if()
# {
# file.copy(paste0(getwd(),"/SSS_files/sssexample_RickPow"),paste0(getwd(),"/Scenarios"),recursive=TRUE,overwrite=TRUE)
# file.rename(paste0(getwd(),"/Scenarios/sssexample_RickPow"), paste0(getwd(),"/Scenarios/",input$Scenario_name))
# }
#Read data and control files
data.file<-SS_readdat(paste0("Scenarios/",input$Scenario_name,"/sss_example.dat"))
ctl.file<-SS_readctl(paste0("Scenarios/",input$Scenario_name,"/sss_example.ctl"),use_datlist = TRUE, datlist=data.file)
#Read, edit then write new DATA file
data.file$styr<-input$styr
data.file$endyr<-input$endyr
data.file$Nages<-Nages()
#Catches
Catch.data<-rv.Ct$data
catch.dep.fleets<-ncol(Catch.data)
data.file$Nfleets<-catch.dep.fleets
if(!is.null(rv.Index$data))
{
index.fleets<-max(rv.Index$data$Fleet)
if(index.fleets>catch.dep.fleets) {data.file$Nfleets<-index.fleets}
if(index.fleets==catch.dep.fleets) {data.file$Nfleets<-index.fleets+1}
if(index.fleets<catch.dep.fleets) {data.file$Nfleets<-catch.dep.fleets}
}
if((data.file$Nfleets-1)>1){
for(i in 1:(data.file$Nfleets-2))
{
data.file$fleetinfo<-rbind(data.file$fleetinfo,data.file$fleetinfo[1,])
data.file$CPUEinfo<-rbind(data.file$CPUEinfo,data.file$CPUEinfo[1,])
}
data.file$fleetinfo$fleetname<-c(paste0("Fishery",1:(catch.dep.fleets-1)),"Depl")
data.file$fleetinfo$type[c(2,data.file$Nfleets)]<-c(1,3)
data.file$fleetinfo$surveytiming[c(2,data.file$Nfleets)]<-c(-1,0.1)
data.file$CPUEinfo[,1]<-1:data.file$Nfleets
data.file$CPUEinfo[c(2,data.file$Nfleets),2]<-c(1,34)
data.file$CPUE$index<-data.file$Nfleets
}
year.in<-Catch.data[,1]
catch.cols<-colnames(data.file$catch)
catch_temp<-list()
for(i in 1:(data.file$Nfleets-1))
{
catch_temp[[i]]<-data.frame(
c(-999,year.in),
rep(1,length(year.in)+1),
rep(i,length(year.in)+1),
c(0,Catch.data[,i+1]),
rep(0.01,length(year.in)+1)
)
}
data.file$catch<-list.rbind(catch_temp)
colnames(data.file$catch)<-catch.cols
#Relative stock status
data.file$CPUE$year<-c(input$styr,input$status_year)
#Length composition data
if(input$Linf_f_mean_sss>30){data.file$binwidth<-2}
data.file$minimum_size<-floor(input$Linf_f_mean_sss/10)
data.file$maximum_size<-ceiling(input$Linf_f_mean_sss+(input$Linf_f_mean_sss*0.1))
data.file$lbin_vector<-seq(data.file$minimum_size,data.file$maximum_size,data.file$binwidth)
data.file$N_lbinspop<-length(data.file$lbin_vector)
#Age composition data
# if (is.null(inFile_age)){
# data.file$N_agebins<-Nages()
# data.file$agebin_vector<-1:Nages()
# data.file$ageerror<-data.frame(matrix(c(rep(-1,(Nages()+1)),rep(0.001,(Nages()+1))),2,(Nages()+1),byrow=TRUE))
# colnames(data.file$ageerror)<-paste0("age",1:Nages())
# }
#Catch units
if(input$Ct_units_choice_SSS)
{
ct.units<-as.numeric(trimws(unlist(strsplit(input$fleet_ct_units_SSS,","))))
#data.file$fleetinfo[ct.units,4]<-2 #use this when just specifying which are fleets are numbers
data.file$fleetinfo[,4]<-c(ct.units,1)
}
SS_writedat(data.file,paste0("Scenarios/",input$Scenario_name,"/sss_example.dat"),overwrite=TRUE)
####################### END DATA FILE #####################################
####################### START SSS CTL FILE #####################################
if(!is.null(input$GT5)){if(input$GT5)
{
ctl.file$N_platoon<-5
ctl.file$sd_ratio<-0.7
ctl.file$submorphdist<-c(-1,0.25,0.5,0.25,0.125)
}
}
#if(all(any(input$est_parms==TRUE,input$est_parms2==FALSE),any(all(!is.null(rv.Lt$data),!is.null(rv.Ct$data)),all(!is.null(rv.Age$data),!is.null(rv.Ct$data))))==TRUE)
#{
fem_vbgf<-VBGF(input$Linf_f_mean_sss,input$k_f_mean_sss,input$t0_f_mean_sss,c(0:Nages()))
#c("lognormal","truncated normal","uniform","beta")
prior.name<-c("no prior","symmetric beta", "beta","lognormal","gamma","normal")
prior.type<-c(0:3,5,6)
#Females
#M
if(input$M_f_prior=="lognormal"){ctl.file$MG_parms[1,3:4]<-c(input$M_f_mean_sss,log(input$M_f_mean_sss))}
else {ctl.file$MG_parms[1,3:4]<-c(input$M_f_mean_sss,input$M_f_mean_sss)}
#L0
ctl.file$Growth_Age_for_L1<-input$t0_f_mean_sss
ctl.file$Growth_Age_for_L1<-0
#if(input$t0_f_prior=="lognormal"){ctl.file$MG_parms[2,3:4]<-c(fem_vbgf[1],log(fem_vbgf[1]))}
#else {ctl.file$MG_parms[2,3:4]<-fem_vbgf[1]}
if(input$t0_f_prior=="lognormal"){ctl.file$MG_parms[2,3:4]<-c(0,log(0.0000001))}
else {ctl.file$MG_parms[2,3:4]<-0}
#Linf
if(input$Linf_f_prior=="lognormal"){ctl.file$MG_parms[3,3:4]<-c(input$Linf_f_mean_sss,log(input$Linf_f_mean_sss))}
else{ctl.file$MG_parms[3,3:4]<-input$Linf_f_mean_sss}
#k
if(input$k_f_prior=="lognormal"){ctl.file$MG_parms[4,3:4]<-c(input$k_f_mean_sss,log(input$k_f_mean_sss))}
else {ctl.file$MG_parms[4,3:4]<-input$k_f_mean_sss}
#CV young
if(input$CV_lt_f_young_prior=="lognormal"){ctl.file$MG_parms[5,3:4]<-c(input$CV_lt_f_young_mean_sss,log(input$CV_lt_f_young_mean_sss))}
else{ctl.file$MG_parms[5,3:4]<-input$CV_lt_f_young_mean_sss}
#CV old
if(input$CV_lt_f_old_prior=="lognormal"){ctl.file$MG_parms[6,3:4]<-c(input$CV_lt_f_old_mean_sss,log(input$CV_lt_f_old_mean_sss))}
else{ctl.file$MG_parms[6,3:4]<-input$CV_lt_f_old_mean_sss}
#Weight-length
ctl.file$MG_parms[7,3:4]<-input$WLa_f_sss #coefficient
ctl.file$MG_parms[8,3:4]<- input$WLb_f_sss #exponent
#Maturity
ctl.file$MG_parms[9,3:4]<-input$L50_f_sss #Lmat50%
ctl.file$MG_parms[10,3:4]<- log(0.05/0.95)/(input$L95_f_sss-input$L50_f_sss) #Maturity slope
#Males
ctl.file$MG_parms[13,3:4]<-c(input$M_f_mean_sss,log(input$M_f_mean_sss)) #M
#ctl.file$MG_parms[14,3:4]<-fem_vbgf[1] #L0
ctl.file$MG_parms[14,3:4]<-0 #L0
ctl.file$MG_parms[15,3:4]<-input$Linf_f_mean_sss #Linf
ctl.file$MG_parms[16,3:4]<-input$k_f_mean_sss #k
ctl.file$MG_parms[17,3:4]<-input$CV_lt_f_young_mean_sss #CV
ctl.file$MG_parms[18,3:4]<-input$CV_lt_f_old_mean_sss #CV
#Weight-length
ctl.file$MG_parms[19,3:4]<-input$WLa_f_sss #coefficient
ctl.file$MG_parms[20,3:4]<- input$WLb_f_sss #exponent
ctl.file$MG_parms[11,3:4]<-input$Fec_a_f_sss #coefficient
ctl.file$MG_parms[12,3:4]<- input$Fec_b_f_sss #exponent
if(input$male_offset_SSS)
{
ctl.file$parameter_offset_approach<-2 #Change to offset approach
ctl.file$MG_parms[13,c(1,3:4)]<-0 #M
ctl.file$MG_parms[14,c(1,3:4)]<-0 #L0
ctl.file$MG_parms[15,c(1,3:4)]<-0 #Linf
ctl.file$MG_parms[16,c(1,3:4)]<-0 #k
ctl.file$MG_parms[17,c(1,3:4)]<-0 #CV
ctl.file$MG_parms[18,c(1,3:4)]<-0 #CV
#Weight-length
ctl.file$MG_parms[19,c(1,3:4)]<-input$WLa_f_sss #coefficient
ctl.file$MG_parms[20,c(1,3:4)]<-input$WLb_f_sss #exponent
}
if(input$male_parms_SSS)
{
male_vbgf_sss<-VBGF(input$Linf_m_mean_sss,input$k_m_mean_sss,input$t0_m_mean_sss,c(input$t0_f_mean_sss:Nages()))
#M
if(input$M_m_prior_sss=="lognormal"){ctl.file$MG_parms[13,3:4]<-c(input$M_m_mean_sss,log(input$M_m_mean_sss))}
else {ctl.file$MG_parms[13,3:4]<-c(input$M_m_mean_sss,input$M_m_mean_sss)}
#L0
if(input$t0_f_prior_sss=="lognormal"){ctl.file$MG_parms[14,3:4]<-c(male_vbgf_sss[1],log(male_vbgf_sss[1]))}
else {ctl.file$MG_parms[14,3:4]<-c(male_vbgf_sss[1],male_vbgf_sss[1])}
# if(input$t0_f_prior_sss=="lognormal"){ctl.file$MG_parms[14,3:4]<-c(0,log(0.0000001))}
#else {ctl.file$MG_parms[14,3:4]<-c(0,0)}
#Linf
if(input$Linf_f_prior_sss=="lognormal"){ctl.file$MG_parms[15,3:4]<-c(input$Linf_m_mean_sss,log(input$Linf_m_mean_sss))}
else{ctl.file$MG_parms[15,3:4]<-c(input$Linf_m_mean_sss,input$Linf_m_mean_sss)}
#k
if(input$k_f_prior_sss=="lognormal"){ctl.file$MG_parms[16,3:4]<-c(input$k_m_mean_sss,log(input$k_m_mean_sss))}
else {ctl.file$MG_parms[16,3:4]<-c(input$k_m_mean_sss,input$k_m_mean_sss)}
#CV young
if(input$CV_lt_f_young_prior_sss=="lognormal"){ctl.file$MG_parms[17,3:4]<-c(input$CV_lt_m_young_mean_sss,log(input$CV_lt_m_young_mean_sss))}
else{ctl.file$MG_parms[17,3:4]<-c(input$CV_lt_m_young_mean_sss,input$CV_lt_m_young_mean_sss)}
#CV old
if(input$CV_lt_f_old_prior_sss=="lognormal"){ctl.file$MG_parms[18,3:4]<-c(input$CV_lt_m_old_mean_sss,log(input$CV_lt_m_old_mean_sss))}
else{ctl.file$MG_parms[18,3:4]<-c(input$CV_lt_m_old_mean_sss,input$CV_lt_m_old_mean_sss)}
#Weight-length
ctl.file$MG_parms[19,3:4]<-input$WLa_m_sss #coefficient
ctl.file$MG_parms[20,3:4]<- input$WLb_m_sss #exponent
}
#S-R
#ctl.file$SR_parms[1,3:4]<-input$lnR0 #lnR0
if(input$h_ss_prior=="lognormal"){ctl.file$SR_parms[2,3:4]<-c(input$h_mean_ss,log(h_mean_ss))}
else{ctl.file$SR_parms[2,3:4]<-input$h_mean_ss}
#}
#
ctl.file$Q_options[1]<-data.file$Nfleets
#Selectivity
Sel50<-as.numeric(trimws(unlist(strsplit(input$Sel50_sss,","))))
Selpeak<-as.numeric(trimws(unlist(strsplit(input$Selpeak_sss,","))))
bin.width<-data.file$lbin_vector[2]-data.file$lbin_vector[1]
if(input$Sel_choice_sss=="Logistic")
{
#ctl.file$size_selex_parms[1,1:2]<-c(min(data.file$lbin_vector)+2*bin.width,max(data.file$lbin_vector)-2*bin.width)
ctl.file$size_selex_parms[1,1:2]<-c(min(data.file$lbin_vector),max(data.file$lbin_vector))
ctl.file$size_selex_parms[1,3:4]<- Selpeak[1]
ctl.file$size_selex_parms[2,3:4]<- 15
ctl.file$size_selex_parms[3,3:4]<- log(-((Sel50[1]-Selpeak[1])^2/log(0.5)))
ctl.file$size_selex_parms[4,3:4]<- -15
ctl.file$size_selex_parms[6,3:4]<- 15
}
if(input$Sel_choice_sss=="Dome-shaped")
{
PeakDesc<-as.numeric(trimws(unlist(strsplit(input$PeakDesc_sss,","))))
LtPeakFinal<-as.numeric(trimws(unlist(strsplit(input$LtPeakFinal_sss,","))))
FinalSel<-as.numeric(trimws(unlist(strsplit(input$FinalSel_sss,","))))
#ctl.file$size_selex_parms[1,1:2]<-c(min(data.file$lbin_vector)+2*bin.width,max(data.file$lbin_vector)-2*bin.width)
ctl.file$size_selex_parms[1,1:2]<-c(min(data.file$lbin_vector),max(data.file$lbin_vector))
ctl.file$size_selex_parms[1,3:4]<- Selpeak[1]
ctl.file$size_selex_parms[2,3:4]<- -log((max(data.file$lbin_vector)-Selpeak[1]-bin.width)/(PeakDesc[1]-Selpeak[1]-bin.width))
ctl.file$size_selex_parms[3,3:4]<- log(-((Sel50[1]-Selpeak[1])^2/log(0.5)))
ctl.file$size_selex_parms[4,3:4]<- log(LtPeakFinal[1])
ctl.file$size_selex_parms[6,3:4]<- -log((1/(FinalSel[1]+0.000000001)-1))
}
#Add other fleets
if((data.file$Nfleets-1)>1){
for(i in 1:(data.file$Nfleets-2))
{
#ctl.file$init_F<-rbind(ctl.file$init_F,ctl.file$init_F[1,])
ctl.file$size_selex_types<-rbind(ctl.file$size_selex_types,ctl.file$size_selex_types[1,])
ctl.file$age_selex_types<-rbind(ctl.file$age_selex_types,ctl.file$age_selex_types[1,])
ctl.file$size_selex_parms<-rbind(ctl.file$size_selex_parms,ctl.file$size_selex_parms[1:6,])
if(input$Sel_choice_sss=="Logistic")
{
#ctl.file$size_selex_parms[6*i+1,1:2]<-c(min(data.file$lbin_vector)+2*bin.width,max(data.file$lbin_vector)-2*bin.width)
ctl.file$size_selex_parms[6*i+1,1:2]<-c(min(data.file$lbin_vector),max(data.file$lbin_vector))
ctl.file$size_selex_parms[6*i+1,3:4]<- Selpeak[i+1]
ctl.file$size_selex_parms[6*i+2,3:4]<- 15
ctl.file$size_selex_parms[6*i+3,3:4]<- log(-((Sel50[i+1]-Selpeak[i+1])^2/log(0.5)))
ctl.file$size_selex_parms[6*i+4,3:4]<- -15
ctl.file$size_selex_parms[6*i+6,3:4]<- 15
}
if(input$Sel_choice_sss=="Dome-shaped")
{
ctl.file$size_selex_parms[6*i+1,1:2]<-c(min(data.file$lbin_vector),max(data.file$lbin_vector))
ctl.file$size_selex_parms[6*i+1,3:4]<- Selpeak[i+1]
ctl.file$size_selex_parms[6*i+2,3:4]<- -log((max(data.file$lbin_vector)-Selpeak[i+1]-bin.width)/(PeakDesc[i+1]-Selpeak[i+1]-bin.width))
ctl.file$size_selex_parms[6*i+3,3:4]<- log(-((Sel50[i+1]-Selpeak[i+1])^2/log(0.5)))
ctl.file$size_selex_parms[6*i+4,3:4]<- log(LtPeakFinal[i+1])
ctl.file$size_selex_parms[6*i+6,3:4]<- -log((1/(FinalSel[i+1]+0.000000001)-1))
}
}
ctl.file$size_selex_types[,1]<-c(rep(24,data.file$Nfleets-1),0)
ctl.file$age_selex_types[,1]<-10
#Re-label so r4ss can interpret these new entries
#rownames(ctl.file$init_F)<-paste0("InitF_seas_1_flt_",1:data.file$Nfleets,"Fishery",1:data.file$Nfleets)
rownames(ctl.file$age_selex_types)<-rownames(ctl.file$size_selex_types)<-c(paste0("Fishery",1:(data.file$Nfleets-1)),"Depl")
size_selex_parms_rownames<-list()
for(f_i in 1:(data.file$Nfleets-1))
{
size_selex_parms_rownames[[f_i]]<-c(paste0("SizeSel_P_1_Fishery",f_i,"(",f_i,")"),
paste0("SizeSel_P_2_Fishery",f_i,"(",f_i,")"),
paste0("SizeSel_P_3_Fishery",f_i,"(",f_i,")"),
paste0("SizeSel_P_4_Fishery",f_i,"(",f_i,")"),
paste0("SizeSel_P_5_Fishery",f_i,"(",f_i,")"),
paste0("SizeSel_P_6_Fishery",f_i,"(",f_i,")"))
}
size_selex_parms_rownames<-unlist(size_selex_parms_rownames)
rownames(ctl.file$size_selex_parms)<-size_selex_parms_rownames
}
SS_writectl(ctl.file,paste0("Scenarios/",input$Scenario_name,"/sss_example.ctl"),overwrite=TRUE)
#Forecast file modfications
#Reference points
#if(!input$use_forecastnew)
#{
forecast.file<-SS_readforecast(paste0("Scenarios/",input$Scenario_name,"/forecast.ss"))
if(input$RP_choices){
forecast.file$SPRtarget<-input$SPR_target
forecast.file$Btarget<-input$B_target
CR_choices<-c("1: Catch fxn of SSB, buffer on F",
"2: F fxn of SSB, buffer on F",
"3: Catch fxn of SSB, buffer on catch",
"4: F fxn of SSB, buffer on catch")
CR_choices_num.vec<-c(1:4)
forecast.file$ControlRuleMethod<-CR_choices_num.vec[CR_choices==input$CR_Ct_F]
forecast.file$SBforconstantF<-input$slope_hi
forecast.file$BfornoF<-input$slope_low
}
if(input$Forecast_choice)
{
forecast.file$Nforecastyrs<-input$forecast_num
buffer.in<-as.numeric(trimws(unlist(strsplit(input$forecast_buffer,","))))
if(length(buffer.in)==1){forecast.file$Flimitfraction<-buffer.in}
if(length(buffer.in)>1)
{
forecast.file$Flimitfraction<--1
buffer.datafr<-data.frame(Year=c((data.file$endyr+1):(data.file$endyr+input$forecast_num)),Fraction=buffer.in)
rownames(buffer.datafr)<-paste0("#_Flimitfraction_m",1:input$forecast_num)
forecast.file$Flimitfraction_m<-buffer.datafr
}
}
SS_writeforecast(forecast.file,paste0("Scenarios/",input$Scenario_name),overwrite=TRUE)
#}
#if(input$use_forecastnew)
# {
# forecast.file<-SS_readforecast(paste0("Scenarios/",input$Scenario_name,"/forecast.ss_new"))
# SS_writeforecast(forecast.file,paste0("Scenarios/",input$Scenario_name),overwrite=TRUE)
# }
#Set prior inputs
#0 = normal
#10 = truncated normal
#1 = symmetric beta (rbeta)
#2 = beta
#3 = lognormal
#30 = truncated lognormal
#4 = uniform
#99 = used only for the steepness parameter. Indicates h will come from FMSY/M prior
sss.prior.name<-c("no prior","symmetric beta","beta","normal","truncated normal","lognormal","truncated lognormal","uniform")
sss.prior.type<-c(-1,1,2,0,10,3,30,4)
Dep.in_sss<-c(sss.prior.type[sss.prior.name==input$Depl_prior_sss],input$Depl_mean_sss,input$Depl_SD_sss)
h.in_sss<-c(sss.prior.type[sss.prior.name==input$h_prior_sss],input$h_mean_sss,input$h_SD_sss)
if(!input$male_offset_SSS)
{
M.in_sss<-c(sss.prior.type[sss.prior.name==input$M_prior_sss],input$M_f_mean_sss,input$M_f_SD_sss,sss.prior.type[sss.prior.name==input$M_prior_sss],input$M_f_mean_sss,input$M_f_SD_sss)
Linf.in_sss<-c(sss.prior.type[sss.prior.name==input$Linf_f_prior_sss],input$Linf_f_mean_sss,input$Linf_f_SD_sss,sss.prior.type[sss.prior.name==input$Linf_f_prior_sss],input$Linf_f_mean_sss,input$Linf_f_SD_sss)
k.in_sss<-c(sss.prior.type[sss.prior.name==input$k_f_prior_sss],input$k_f_mean_sss,input$k_f_SD_sss,sss.prior.type[sss.prior.name==input$k_f_prior_sss],input$k_f_mean_sss,input$k_f_SD_sss)
t0.in_sss<-c(sss.prior.type[sss.prior.name==input$t0_f_prior_sss],input$t0_f_mean_sss,input$t0_f_SD_sss,sss.prior.type[sss.prior.name==input$t0_f_prior_sss],input$t0_f_mean_sss,input$t0_f_SD_sss)
}
if(input$male_offset_SSS)
{
M.in_sss<-c(sss.prior.type[sss.prior.name==input$M_prior_sss],input$M_f_mean_sss,input$M_f_SD_sss,sss.prior.type[sss.prior.name==input$M_prior_sss],0,0)
Linf.in_sss<-c(sss.prior.type[sss.prior.name==input$Linf_f_prior_sss],input$Linf_f_mean_sss,input$Linf_f_SD_sss,sss.prior.type[sss.prior.name==input$Linf_f_prior_sss],0,0)
k.in_sss<-c(sss.prior.type[sss.prior.name==input$k_f_prior_sss],input$k_f_mean_sss,input$k_f_SD_sss,sss.prior.type[sss.prior.name==input$k_f_prior_sss],0,0)
t0.in_sss<-c(sss.prior.type[sss.prior.name==input$t0_f_prior_sss],input$t0_f_mean_sss,input$t0_f_SD_sss,sss.prior.type[sss.prior.name==input$t0_f_prior_sss],0,0)
}
if(input$male_parms_SSS)
{
M.in_sss<-c(sss.prior.type[sss.prior.name==input$M_prior_sss],input$M_f_mean_sss,input$M_f_SD_sss,sss.prior.type[sss.prior.name==input$M_m_prior_sss],input$M_m_mean_sss,input$M_m_SD_sss)
Linf.in_sss<-c(sss.prior.type[sss.prior.name==input$Linf_f_prior_sss],input$Linf_f_mean_sss,input$Linf_f_SD_sss,sss.prior.type[sss.prior.name==input$Linf_m_prior_sss],input$Linf_m_mean_sss,input$Linf_f_SD_sss)
k.in_sss<-c(sss.prior.type[sss.prior.name==input$k_f_prior_sss],input$k_f_mean_sss,input$k_f_SD_sss,sss.prior.type[sss.prior.name==input$k_m_prior_sss],input$k_m_mean_sss,input$k_m_SD_sss)
t0.in_sss<-c(sss.prior.type[sss.prior.name==input$t0_f_prior_sss],input$t0_f_mean_sss,input$t0_f_SD_sss,sss.prior.type[sss.prior.name==input$t0_m_prior_sss],input$t0_m_mean_sss,input$t0_m_SD_sss)
}
show_modal_spinner(spin="flower",color=wes_palettes$Zissou1[2],text="Model run in progress")
#Run SSS
SSS.out<-SSS(paste0("Scenarios/",input$Scenario_name),
file.name=c("sss_example.dat","sss_example.ctl"),
reps=input$SSS_reps,
seed.in=input$SSS_seed,
Dep.in=Dep.in_sss,
M.in=M.in_sss,
SR_type=3,
h.in=h.in_sss,
FMSY_M.in=c(-1,0.5,0.1),
BMSY_B0.in=c(-1,0.5,0.1),
Linf.k.cor=input$Linf_k_cor_sss,
Linf.in=Linf.in_sss,
k.in=k.in_sss,
t0.in=t0.in_sss,
Zfrac.Beta.in=c(-99,0.2,0.6,-99,0.5,2),
R_start=c(0,input$lnR0_sss),
doR0.loop=c(1,round(input$lnR0_sss*0.5),round(input$lnR0_sss*1.5),(round(input$lnR0_sss*1.3)-round(input$lnR0_sss*0.5))/10),
sum_age=0,
ts_yrs=c(input$styr,input$endyr),
pop.ltbins=NA,
#ofl_yrs=c(input$endyr+1,input$endyr+2),
sexes=T,
BH_FMSY_comp=F,
OStype=input$OS_choice)
#save(SSS.out)
show_modal_spinner(spin="flower",color=wes_palettes$Zissou1[3],text="Process model output")
if(exists(load(paste0("Scenarios/",input$Scenario_name,"/SSS_out.DMP"))))
{
output$SSS_priors_post<-renderPlot({
if(exists(load(paste0("Scenarios/",input$Scenario_name,"/SSS_out.DMP"))))
{
load(paste0("Scenarios/",input$Scenario_name,"/SSS_out.DMP"))
sss.M.f<-rbind(data.frame(value=SSS.out$Prior$M_f,type="prior",metric="Female M"),data.frame(value=SSS.out$Post$M_f,type="post",metric="Female M"))
sss.M.m<-rbind(data.frame(value=SSS.out$Prior$M_m,type="prior",metric="Male M"),data.frame(value=SSS.out$Post$M_m,type="post",metric="Male M"))
sss.h<-rbind(data.frame(value=SSS.out$Prior$h,type="prior",metric="h"),data.frame(value=SSS.out$Post$h,type="post",metric="h"))
sss.Dep<-rbind(data.frame(value=SSS.out$Prior$Dep,type="prior",metric="Dep"),data.frame(value=SSS.out$Post$Dep.Obs,type="post",metric="Dep"))
sss.vals.out<-rbind(sss.M.f,sss.M.m,sss.h,sss.Dep)
ggplot(sss.vals.out,aes(x=value,color=type,fill=type))+
geom_histogram(position="dodge",alpha=0.5)+
theme(legend.position="bottom")+
theme(legend.title=element_blank())+
facet_grid(~metric,scales = "free")
# Mf.plot<-ggplot(sss.M.f,aes(x=value,color=type))+geom_histogram(position="dodge",alpha=0.5,fill="white")
# Mm.plot<-ggplot(sss.M.m,aes(x=value,color=type))+geom_histogram(position="dodge",alpha=0.5,fill="white")
# h.plot<-ggplot(sss.h,aes(x=value,color=type))+geom_histogram(position="dodge",alpha=0.5,fill="white")
# Dep.plot<-ggplot(sss.Dep,aes(x=value,color=type))+geom_histogram(position="dodge",alpha=0.5,fill="white")
}
else{return(NULL)}
})
output$SSS_growth_priors_post<-renderPlot({
if(exists(load(paste0("Scenarios/",input$Scenario_name,"/SSS_out.DMP"))))
{
load(paste0("Scenarios/",input$Scenario_name,"/SSS_out.DMP"))
sss.L1_f<-rbind(data.frame(value=SSS.out$Prior$L1_f,type="prior",metric="Female L1"),data.frame(value=SSS.out$Post$L1_f,type="post",metric="Female L1"))
sss.Linf_f<-rbind(data.frame(value=SSS.out$Prior$Linf_f,type="prior",metric="Female Linf"),data.frame(value=SSS.out$Post$Linf_f,type="post",metric="Female Linf"))
sss.k_f<-rbind(data.frame(value=SSS.out$Prior$k_f,type="prior",metric="Female k"),data.frame(value=SSS.out$Post$k_f,type="post",metric="Female k"))
sss.L1_m<-rbind(data.frame(value=SSS.out$Prior$L1_m,type="prior",metric="Male L1"),data.frame(value=SSS.out$Post$L1_m,type="post",metric="Male L1"))
sss.Linf_m<-rbind(data.frame(value=SSS.out$Prior$Linf_m,type="prior",metric="Male Linf"),data.frame(value=SSS.out$Post$Linf_m,type="post",metric="Male Linf"))
sss.k_m<-rbind(data.frame(value=SSS.out$Prior$k_m,type="prior",metric="Male k"),data.frame(value=SSS.out$Post$k_m,type="post",metric="Male k"))
sss.vals.growth.out<-rbind(sss.L1_f,sss.Linf_f,sss.k_f,sss.L1_m,sss.Linf_m,sss.k_m)
ggplot(sss.vals.growth.out,aes(x=value,color=type,fill=type))+
geom_histogram(position="dodge",alpha=0.5)+
theme(legend.position="bottom")+
theme(legend.title=element_blank())+
facet_wrap(~metric,scales = "free")
}
else{return(NULL)}
})
output$SSS_OFL_plot<-renderPlot({
if(exists(load(paste0("Scenarios/",input$Scenario_name,"/SSS_out.DMP"))))
{
load(paste0("Scenarios/",input$Scenario_name,"/SSS_out.DMP"))
ofl.years<-as.numeric(unique(melt(SSS.out$OFL)$Var2))
ggplot(melt(SSS.out$OFL),aes(Var2,value,group=Var2))+
geom_boxplot(fill="#236192")+
scale_x_continuous(breaks=ofl.years,labels=as.character(ofl.years))+
ylab("OFL (mt)")+
xlab("Year")
}
else{return(NULL)}
})
output$SSS_ABC_plot<-renderPlot({
if(exists(load(paste0("Scenarios/",input$Scenario_name,"/SSS_out.DMP"))))
{
load(paste0("Scenarios/",input$Scenario_name,"/SSS_out.DMP"))
abc.years<-as.numeric(unique(melt(SSS.out$ABC)$Var2))
ggplot(melt(SSS.out$ABC),aes(Var2,value,group=Var2))+
geom_boxplot(fill="#658D1B")+
scale_x_continuous(breaks=abc.years,labels=as.character(abc.years))+
ylab("ABC (mt)")+
xlab("Year")
}
else{return(NULL)}
})
}
remove_modal_spinner()
})
###############
### END SSS ###
###############
##################################################################
### PREPARE FILES and RUN Length and Age-based Stock Synthsis ###
##################################################################
SS.file.update<-observeEvent(input$run_SS,{
# if(is.null(inFile) | !anyNA(inp$
# styr,ndyr,
# input$Nages,
# input$M_f,
# input$k_f,
# input$Linf_f,
# input$t0_f,
# input$L50_f,
# input$L95_f,
# input$M_m,
# input$k_m,
# input$Linf_m,
# input$t0_m,
# input$L50_m,
# input$L95_m,
# ))
# {
updateTabsetPanel(session, "tabs",
selected = '1')
# progress <- shiny::Progress$new(session, min=1, max=2)
# on.exit(progress$close())
# progress$set(message = 'Model run in progress',
# detail = '')
# for (i in 1:2) {
# progress$set(value = i)
# Sys.sleep(0.5)
# }
if(!any(input$use_par,input$use_datanew,input$use_controlnew,input$user_model))
#if(which(c(input$use_par,input$use_datanew,input$use_datanew_user,input$use_controlnew,input$use_controlnew_user,input$user_model))!=0)
{
#Copy and move files
if(file.exists(paste0("Scenarios/",input$Scenario_name)))
{
unlink(paste0("Scenarios/",input$Scenario_name),recursive=TRUE) #Deletes previous run
# file.remove(paste0(getwd(),"/Scenarios/",input$Scenario_name))
}
if(input$Ct_F_LO_select=="Estimate F" & is.null(rv.Ct$data)){
file.copy(paste0("SS_LO_F_files"),paste0("Scenarios"),recursive=TRUE,overwrite=TRUE)
file.rename(paste0("Scenarios/SS_LO_F_files"), paste0("Scenarios/",input$Scenario_name))
}
else{
file.copy(paste0("SS_LB_files"),paste0("Scenarios"),recursive=TRUE,overwrite=TRUE)
file.rename(paste0("Scenarios/SS_LB_files"), paste0("Scenarios/",input$Scenario_name))
}
}
# if(!input$use_customfile)
# {
# }
#Read data and control files
if(!input$user_model)
{
data.file<-SS_readdat(paste0("Scenarios/",input$Scenario_name,"/datafile.dat"))
ctl.file<-SS_readctl(paste0("Scenarios/",input$Scenario_name,"/controlfile.ctl"),use_datlist = TRUE, datlist=data.file)
}
if(input$use_datanew)
{
data.file<-SS_readdat(paste0("Scenarios/",input$Scenario_name,"/data_echo.ss_new"))
}
if(input$use_controlnew)
{
data.file<-SS_readdat(paste0("Scenarios/",input$Scenario_name,"/data_echo.ss_new"))
ctl.file<-SS_readctl(paste0("Scenarios/",input$Scenario_name,"/control.ss_new"),use_datlist = TRUE, datlist=data.file)
}
# data.file<-SS_readdat(paste0(getwd(),"/Scenarios/",input$Scenario_name,"/SS_LB.dat"))
# ctl.file<-SS_readctl(paste0(getwd(),"/Scenarios/",input$Scenario_name,"/SS_LB.ctl"),use_datlist = TRUE, datlist=data.file)
#if(input$Ct_F_LO_select=="Estimate F" & is.null(rv.Ct$data))
# {
# data.file<-SS_readdat(paste0(getwd(),"/Scenarios/",input$Scenario_name,"/SS_LB.dat"))
# ctl.file<-SS_readctl(paste0(getwd(),"/Scenarios/",input$Scenario_name,"/SS_LB.ctl"),use_datlist = TRUE, datlist=data.file)
# }
if(!input$user_model)
{
#Prepare inputs to evaluate any errors
Sel50<-as.numeric(trimws(unlist(strsplit(input$Sel50,","))))
Sel50_phase<-as.numeric(trimws(unlist(strsplit(input$Sel50_phase,","))))
Selpeak<-as.numeric(trimws(unlist(strsplit(input$Selpeak,","))))
Selpeak_phase<-as.numeric(trimws(unlist(strsplit(input$Selpeak_phase,","))))
bin.width<-data.file$lbin_vector[2]-data.file$lbin_vector[1]
minmaxbin<-min(Selpeak[1]-min(data.file$lbin_vector),max(data.file$lbin_vector)-Selpeak[1])
#sel.inputs.comps<-length(Sel50)-length(Sel50_phase)-length(Selpeak)-length(Selpeak_phase)
sel.inputs.lts<-c(length(Sel50),length(Sel50_phase),length(Selpeak),length(Selpeak_phase))
Nfleets<-max(ncol(rv.Ct$data)-1,rv.Lt$data[,3],rv.Age$data[,3],rv.Index$data[,3])
if(input$Sel_choice=="Dome-shaped")
{
PeakDesc<-as.numeric(trimws(unlist(strsplit(input$PeakDesc,","))))
PeakDesc_phase<-as.numeric(trimws(unlist(strsplit(input$PeakDesc_phase,","))))
LtPeakFinal<-as.numeric(trimws(unlist(strsplit(input$LtPeakFinal,","))))
LtPeakFinal_phase<-as.numeric(trimws(unlist(strsplit(input$LtPeakFinal_phase,","))))
FinalSel<-as.numeric(trimws(unlist(strsplit(input$FinalSel,","))))
FinalSel_phase<-as.numeric(trimws(unlist(strsplit(input$FinalSel_phase,","))))
minmaxbin<-min(Selpeak[1]-min(data.file$lbin_vector),max(data.file$lbin_vector)-Selpeak[1])
sel.inputs.lts<-c(length(Sel50),length(Sel50_phase),length(Selpeak),length(Selpeak_phase),length(PeakDesc),length(PeakDesc_phase),length(LtPeakFinal),length(LtPeakFinal_phase),length(FinalSel),length(FinalSel_phase))
}
#Search for errors in inputs
#Throw warning if not enough selectivity inputs
if(!all(Nfleets==sel.inputs.lts))
{
#Throw warning if not enough selectivity inputs
sendSweetAlert(
session = session,
title = "Selectivity input warning",
text = "Please check to see if you have provided filled in the inputs correctly. Especially check selectivity for missing fleets (both in parameter and phases). Total fleets includes fishing fleets and surveys.",
type = "error")
remove_modal_spinner()
}
if(all(Nfleets==sel.inputs.lts))
{
checkmod<-1 #add object to verify no errors in inputs and model can be run
show_modal_spinner(spin="flower",color=wes_palettes$Zissou1[2],text="Model run in progress")
if(!input$use_par)
{
if(all(!input$use_datanew,!input$user_model))
{
#Read, edit then write new DATA file
data.file$styr<-input$styr
data.file$endyr<-input$endyr
data.file$Nages<-Nages()
if(!is.null(rv.Ct$data)){catch.fleets<-max(ncol(rv.Ct$data)-1)}
if(all(!is.null(rv.Lt$data),is.null(rv.Ct$data))){catch.fleets<-max(rv.Lt$data[,3])}
data.file$Nfleets<-max(catch.fleets,rv.Lt$data[,3],rv.Age$data[,3],rv.Index$data[,3])
#########
#Catches#
#########
if (is.null(rv.Ct$data))
{
#inFile<- rv.Lt$data
Lt.comp.data<-rv.Lt$data
Age.comp.data<- rv.Age$data
#data.file$Nfleets<-max(Lt.comp.data[,2],Age.comp.data[,2])
if(input$Ct_F_LO_select=="Estimate F"){data.file$bycatch_fleet_info[4:5]<-c(input$styr,input$endyr)}
if(data.file$Nfleets>1){
for(i in 1:(data.file$Nfleets-1))
{
if(input$Ct_F_LO_select=="Estimate F"){data.file$bycatch_fleet_info<-rbind(data.file$bycatch_fleet_info,data.file$bycatch_fleet_info[1,])}
}
if(input$Ct_F_LO_select=="Estimate F"){data.file$bycatch_fleet_info[,1]<-c(1:data.file$Nfleets)}
}
year.in<-input$styr:input$endyr
catch.cols<-colnames(data.file$catch)
catch_temp<-list()
if(catch.fleets==1){catch.level<-1000}
if(catch.fleets>1){
catch.level<-as.numeric(trimws(unlist(strsplit(input$Wt_fleet_Ct,","))))
catch.level<-catch.level/sum(catch.level)*1000
}
for(i in 1:catch.fleets)
{
catch_temp[[i]]<-data.frame(
c(-999,year.in),
rep(1,length(year.in)+1),
rep(i,length(year.in)+1),
c(catch.level[i],rep(catch.level[i],length(year.in))),
c(0.01,rep(1000,length(year.in)))
)
}
data.file$catch<-list.rbind(catch_temp)
colnames(data.file$catch)<-catch.cols
}
if(!is.null(rv.Ct$data))
{
Catch.data<-rv.Ct$data
#data.file$Nfleets<-max(ncol(Catch.data)-1,data.file$Nfleets)
year.in<-Catch.data[,1]
catch.cols<-colnames(data.file$catch)
catch_temp<-list()
for(i in 1:catch.fleets)
{
catch_temp[[i]]<-data.frame(
c(-999,year.in),
rep(1,length(year.in)+1),
rep(i,length(year.in)+1),
c(0.00000000000000000001,Catch.data[,i+1]),
rep(0.01,length(year.in)+1)
)
}
data.file$catch<-list.rbind(catch_temp)
colnames(data.file$catch)<-catch.cols
}
#Index data
if (!is.null(rv.Index$data)) {
Index.data<-rv.Index$data
data.file$N_cpue<-unique(rv.Index$data[,3])
data.file$CPUE<-data.frame(year=rv.Index$data[,1],seas=rv.Index$data[,2],index=rv.Index$data[,3],obs=rv.Index$data[,4],se_log=rv.Index$data[,5])
}
#########################
#Length composition data#
#########################
#Population length data bins
data.file$binwidth<-2
if(!is.null(rv.Lt$data)){data.file$binwidth<-as.numeric(colnames(rv.Lt$data)[7])-as.numeric(colnames(rv.Lt$data)[6])}
data.file$minimum_size<-2
if(!is.null(rv.Lt$data)){data.file$minimum_size<-as.numeric(colnames(rv.Lt$data)[6])}
max.bin.in<-2*(round((Linf()+(Linf()*0.25))/2))+2 #0.2326
data.file$maximum_size<-max.bin.in
# if(input$advance_ss_click)
# {
data.file$binwidth<-input$lt_bin_size
data.file$minimum_size<-input$lt_min_bin
data.file$maximum_size<-input$lt_max_bin
# }
#inFile<- rv.Lt$data
if (is.null(rv.Lt$data)) {
if(input$est_parms==FALSE){Linf_bins<-input$Linf_f_fix}
if(input$est_parms==TRUE){Linf_bins<-input$Linf_f_mean}
data.file$binwidth<-2
data.file$minimum_size<-2
max.bin.in<-2*(round((Linf()+(Linf()*0.25))/2))+2 #0.2326
data.file$maximum_size<-max.bin.in
data.file$lbin_vector<-seq(data.file$minimum_size,data.file$maximum_size,data.file$binwidth)
data.file$N_lbins<-length(data.file$lbin_vector)
data.file$lencomp<-NULL
}
if (!is.null(rv.Lt$data)) {
Lt.comp.data<-rv.Lt$data
data.file$N_lbins<-ncol(Lt.comp.data)-5
data.file$lbin_vector<-as.numeric(colnames(rv.Lt$data)[6:ncol(rv.Lt$data)]) #as.numeric(colnames(Lt.comp.data[,5:ncol(Lt.comp.data)]))
if(data.file$maximum_size<max(data.file$lbin_vector)){data.file$maximum_size<-(2*round(max(data.file$lbin_vector)/2))+2}
lt.data.names<-c(colnames(data.file$lencomp[,1:6]),paste0("f",data.file$lbin_vector),paste0("m",data.file$lbin_vector))
lt.data.females<-lt.data.males<-lt.data.unknowns<-lt.data.sex3<-data.frame(matrix(rep(NA,length(lt.data.names)),nrow=1))
colnames(Lt.comp.data)[1:5]<-c("Year","Month","Fleet","Sex","Nsamps")
#female lengths
if(nrow(subset(Lt.comp.data,Sex==1))>0){
Lt.comp.data_female<-subset(Lt.comp.data,Sex==1 & Nsamps>0)
samp.yrs<-Lt.comp.data_female[,1]
lt.data.females<-data.frame(cbind(samp.yrs,
Lt.comp.data_female[,2],
Lt.comp.data_female[,3],
Lt.comp.data_female[,4],
rep(0,length(samp.yrs)),
Lt.comp.data_female[,5],
Lt.comp.data_female[,6:ncol(Lt.comp.data_female)],
Lt.comp.data_female[,6:ncol(Lt.comp.data_female)]*0)
)
}
#male lengths
if(nrow(subset(Lt.comp.data,Sex==2))>0){
Lt.comp.data_male<-subset(Lt.comp.data,Sex==2 & Nsamps>0)
samp.yrs_males<-Lt.comp.data_male[,1]
lt.data.males<-data.frame(cbind(samp.yrs_males,
Lt.comp.data_male[,2],
Lt.comp.data_male[,3],
Lt.comp.data_male[,4],
rep(0,length(samp.yrs_males)),
Lt.comp.data_male[,5],
Lt.comp.data_male[,6:ncol(Lt.comp.data_male)]*0,
Lt.comp.data_male[,6:ncol(Lt.comp.data_male)])
)
}
#unknown sex lengths
if(nrow(subset(Lt.comp.data,Sex==0))>0){
Lt.comp.data_unknown<-subset(Lt.comp.data,Sex==0 & Nsamps>0)
samp.yrs_unknown<-Lt.comp.data_unknown[,1]
lt.data.unknowns<-data.frame(cbind(samp.yrs_unknown,
Lt.comp.data_unknown[,2],
Lt.comp.data_unknown[,3],
Lt.comp.data_unknown[,4],
rep(0,length(samp.yrs_unknown)),
Lt.comp.data_unknown[,5],
Lt.comp.data_unknown[,6:ncol(Lt.comp.data_unknown)],
Lt.comp.data_unknown[,6:ncol(Lt.comp.data_unknown)]*0)
)
}
#Maintain sample sex ratio
if(input$Sex3){
yrsfleet_females<-paste0(Lt.comp.data_female[,1],Lt.comp.data_female[,3])
yrsfleet_males<-paste0(Lt.comp.data_male[,1],Lt.comp.data_male[,3])
#Match years
#samp.yrs_sex3<-samp.yrs_females[match(samp.yrs_males,samp.yrs_females)]
sex3_match_female<-yrsfleet_females%in%yrsfleet_males
sex3_match_male<-yrsfleet_males%in%yrsfleet_females
#Subset years
Lt.comp.data_female_sex3<-Lt.comp.data_female[sex3_match_female,]
Lt.comp.data_male_sex3<-Lt.comp.data_male[sex3_match_male,]
lt.data.sex3<-data.frame(cbind(Lt.comp.data_female_sex3[,1],
Lt.comp.data_female_sex3[,2],
Lt.comp.data_female_sex3[,3],
rep(3,nrow(Lt.comp.data_female_sex3)),
rep(0,nrow(Lt.comp.data_female_sex3)),
Lt.comp.data_female_sex3[,5]+Lt.comp.data_male_sex3[,4],
Lt.comp.data_female_sex3[,6:ncol(Lt.comp.data_female_sex3)],
Lt.comp.data_male_sex3[,6:ncol(Lt.comp.data_male_sex3)])
)
lt.data.females<-lt.data.females[!sex3_match_female,]
lt.data.males<-lt.data.males[!sex3_match_male,]
}
colnames(lt.data.females)<-colnames(lt.data.males)<-colnames(lt.data.unknowns)<-colnames(lt.data.sex3)<-lt.data.names
data.file$lencomp<-na.omit(rbind(lt.data.unknowns,lt.data.females,lt.data.males,lt.data.sex3))
}
#}
#else{
# data.file$lencomp<-data.frame(matrix(cbind(samp.yrs,
# rep(1,length(samp.yrs)),
# rep(1,length(samp.yrs)),
# rep(1,length(samp.yrs)),
# rep(0,length(samp.yrs)),
# colSums(Lt.comp.data[-1]),
# t(Lt.comp.data)[-1,],
# t(Lt.comp.data)[-1,]*0),
# nrow=length(samp.yrs),
# ncol=6+length(Lt.comp.data[,1])*2,
# byrow=FALSE))[,,drop=FALSE]
# }
# colnames(data.file$lencomp)<-lt.data.names
######################
#Age composition data#
######################
Age.comp.data<-rv.Age$data
if (is.null(Age.comp.data))
{
data.file$N_agebins<-Nages()
data.file$agebin_vector<-0:(Nages()-1)
data.file$ageerror<-data.frame(matrix(c(rep(-1,(Nages()+1)),rep(0.001,(Nages()+1))),2,(Nages()+1),byrow=TRUE))
colnames(data.file$ageerror)<-paste0("age",0:Nages())
}
if (!is.null(Age.comp.data))
{
data.file$N_agebins<-ncol(Age.comp.data)-8
data.file$agebin_vector<-as.numeric(colnames(Age.comp.data[,9:ncol(Age.comp.data)]))
data.file$ageerror<-data.frame(matrix(c(rep(-1,(Nages()+1)),rep(0.001,(Nages()+1))),2,(Nages()+1),byrow=TRUE))
if(!is.null(input$Ageing_error_choice)){
if(input$Ageing_error_choice)
{
data.file$ageerror<-data.frame((rv.AgeErr$data))
data.file$N_ageerror_definitions<-nrow(rv.AgeErr$data)/2
}
}
#Label object for r4ss
colnames(data.file$ageerror)<-paste0("age",0:Nages())
rownames(data.file$ageerror)<-c(1:nrow(data.file$ageerror))
# data.file$ageerror<-data.frame(matrix(c(rep(-1,(Nages()+1)),rep(0.001,(Nages()+1))),2,(Nages()+1),byrow=TRUE))
# colnames(data.file$ageerror)<-paste0("age",1:Nages())
age.data.names<-c(c("Yr","Month","Fleet","Sex","Part","Ageerr","Lbin_lo","Lbin_hi","Nsamp"),paste0("f",data.file$agebin_vector),paste0("m",data.file$agebin_vector))
age.data.females<-age.data.males<-age.data.unknowns<-data.frame(matrix(rep(NA,length(age.data.names)),nrow=1))
colnames(Age.comp.data)[1:8]<-c("Year","Month","Fleet","Sex","AgeErr","Lbin_low","Lbin_hi","Nsamps")
#female ages
if(nrow(subset(Age.comp.data,Sex==1))>0){
Age.comp.data_female<-subset(Age.comp.data,Sex==1 & Nsamps>0)
samp.yrs_females<-Age.comp.data_female[,1]
age.data.females<-data.frame(cbind(samp.yrs_females,
Age.comp.data_female[,2],
Age.comp.data_female[,3],
Age.comp.data_female[,4],
rep(0,length(samp.yrs_females)),
Age.comp.data_female[,5],
Age.comp.data_female[,6],
Age.comp.data_female[,7],
Age.comp.data_female[,8],
Age.comp.data_female[,9:ncol(Age.comp.data_female)],
Age.comp.data_female[,9:ncol(Age.comp.data_female)]*0)
)
}
#male ages
if(nrow(subset(Age.comp.data,Sex==2))>0){
Age.comp.data_male<-subset(Age.comp.data,Sex==2 & Nsamps>0)
samp.yrs_males<-Age.comp.data_male[,1]
age.data.males<-data.frame(cbind(samp.yrs_males,
Age.comp.data_male[,2],
Age.comp.data_male[,3],
Age.comp.data_male[,4],
rep(0,length(samp.yrs_males)),
Age.comp.data_male[,5],
Age.comp.data_male[,6],
Age.comp.data_male[,7],
Age.comp.data_male[,8],
Age.comp.data_male[,9:ncol(Age.comp.data_male)]*0,
Age.comp.data_male[,9:ncol(Age.comp.data_male)])
)
}
#unknown sex ages
if(nrow(subset(Age.comp.data,Sex==0))>0){
Age.comp.data_unknown<-subset(Age.comp.data,Sex==0 & Nsamps>0)
samp.yrs_unknown<-Age.comp.data_unknown[,1]
age.data.unknowns<-data.frame(cbind(samp.yrs_unknown,
Age.comp.data_unknown[,2],
Age.comp.data_unknown[,3],
Age.comp.data_unknown[,4],
rep(0,length(samp.yrs_unknown)),
Age.comp.data_unknown[,5],
Age.comp.data_unknown[,6],
Age.comp.data_unknown[,7],
Age.comp.data_unknown[,8],
Age.comp.data_unknown[,9:ncol(Age.comp.data_unknown)],
Age.comp.data_unknown[,9:ncol(Age.comp.data_unknown)]*0)
)
}
#if(nrow(subset(Age.comp.data,Sex==0))>0){age.data.unknowns<-data.frame(cbind(
# age.data.unknowns,
# Age.comp.data[1,7:ncol(Age.comp.data_unknown)],
# Age.comp.data[1,7:ncol(Age.comp.data_unknown)]*0))
# }
colnames(age.data.females)<-colnames(age.data.males)<-colnames(age.data.unknowns)<-age.data.names
data.file$agecomp<-na.omit(rbind(age.data.females,age.data.males,age.data.unknowns))
}
# inFile_age<- rv.Age$data
# if (is.null(inFile_age)){
# data.file$N_agebins<-Nages()
# data.file$agebin_vector<-1:Nages()
# data.file$ageerror<-data.frame(matrix(c(rep(-1,(Nages()+1)),rep(0.001,(Nages()+1))),2,(Nages()+1),byrow=TRUE))
# colnames(data.file$ageerror)<-paste0("age",1:Nages())
# }
# if (!is.null(inFile_age)){
# Age.comp.data<-rv.Age$data
# age.classes<-nrow(Age.comp.data)
# data.file$N_agebins<-age.classes
# data.file$agebin_vector<-Age.comp.data[,1]
# data.file$ageerror<-data.frame(matrix(c(rep(-1,(age.classes+1)),rep(0.001,(age.classes+1))),2,(age.classes+1),byrow=TRUE))
# colnames(data.file$ageerror)<-paste0("age",1:Nages())
# age.samp.yrs<-as.numeric(colnames(Age.comp.data)[-1])
# age.data.names<-c(c("Yr","Seas","FltSvy","Gender","Part","Ageerr","Lbin_lo","Lbin_hi","Nsamp"),paste0("f",Age.comp.data[,1]),paste0("m",Age.comp.data[,1]))
# if(length(age.samp.yrs)==1){
# data.file$agecomp<-data.frame(matrix(c(samp.yrs,
# rep(1,length(age.samp.yrs)),
# rep(1,length(age.samp.yrs)),
# rep(1,length(age.samp.yrs)),
# rep(0,length(age.samp.yrs)),
# rep(-1,length(age.samp.yrs)),
# rep(-1,length(age.samp.yrs)),
# rep(-1,length(age.samp.yrs)),
# colSums(Age.comp.data[-1]),
# t(Age.comp.data)[-1,],
# t(Age.comp.data)[-1,]*0),
# nrow=length(age.samp.yrs),
# ncol=9+length(Age.comp.data[,1])*2,
# byrow=FALSE))[,,drop=FALSE]
# }
# else{
# data.file$agecomp<-data.frame(matrix(cbind(samp.yrs,
# rep(1,length(age.samp.yrs)),
# rep(1,length(age.samp.yrs)),
# rep(1,length(age.samp.yrs)),
# rep(0,length(age.samp.yrs)),
# rep(-1,length(age.samp.yrs)),
# rep(-1,length(age.samp.yrs)),
# rep(-1,length(age.samp.yrs)),
# colSums(Age.comp.data[-1]),
# t(Age.comp.data)[-1,],
# t(Age.comp.data)[-1,]*0),
# nrow=length(age.samp.yrs),
# ncol=9+length(Age.comp.data[,1])*2,
# byrow=FALSE))[,,drop=FALSE]
# }
# colnames(data.file$agecomp)<-age.data.names
# }
#Create data info
if(data.file$Nfleets>1){
for(i in 1:(data.file$Nfleets-1))
{
data.file$fleetinfo<-rbind(data.file$fleetinfo,data.file$fleetinfo[1,])
data.file$CPUEinfo<-rbind(data.file$CPUEinfo,data.file$CPUEinfo[1,])
data.file$len_info<-rbind(data.file$len_info,data.file$len_info[1,])
data.file$age_info<-rbind(data.file$age_info,data.file$age_info[1,])
}
#Set Dirichlet on
# data.file$age_info[,5]<-data.file$len_info[,5]<-1
#Set up the correct fleet enumeration
# data.file$len_info[,6]<-1:data.file$Nfleets #Used for Dirichlet set-up
# data.file$age_info[,6]<-(data.file$Nfleets+1):(2*data.file$Nfleets) #Used for Dirichlet set-up
#Survey names
if(is.null(rv.Ct$data)){data.file$fleetinfo$fleetname<-paste0("Fishery",1:data.file$Nfleets)}
if(!is.null(rv.Ct$data))
{
fishery.names<-gsub(" ","",colnames(rv.Ct$data)[-1])
if(!is.null(rv.Index$data)&data.file$Nfleets>catch.fleets)
{
Surveyonly<-subset(rv.Index$data,Fleet>catch.fleets)
fleet.survey.names<-unique(c(fishery.names,unique(Surveyonly[,6])))
survey.fleets<-unique(Surveyonly[,3])
data.file$fleetinfo$fleetname<-fleet.survey.names
}
if(is.null(rv.Index$data)|all(!is.null(rv.Index$data)&data.file$Nfleets==catch.fleets)){data.file$fleetinfo$fleetname<-fishery.names}
if(!is.null(rv.Index$data)& max(rv.Index$data[,3])>length(fishery.names)){data.file$fleetinfo[survey.fleets,1]<-3}
}
data.file$CPUEinfo[,1]<-1:data.file$Nfleets
}
if(!is.null(rv.Index$data)&data.file$Nfleets>catch.fleets)
{
if(any(fleet.survey.names=="RSS"))
{
data.file$CPUEinfo[grep("RSS",fleet.survey.names),2]<-34
}
}
#Change survey timing to 1
data.file$fleetinfo$surveytiming[data.file$fleetinfo$type%in%3]<-1
#Catch units
if(input$Ct_units_choice)
{
ct.units<-as.numeric(trimws(unlist(strsplit(input$fleet_ct_units,","))))
#data.file$fleetinfo[ct.units,4]<-2 #use this when just specifying which are fleets are numbers
data.file$fleetinfo[,4]<-ct.units
}
SS_writedat(data.file,paste0("Scenarios/",input$Scenario_name,"/datafile.dat"),overwrite=TRUE)
}
####################### END DATA FILE #####################################
##################################################################################
####################### START CTL FILE ####################################
#Read, edit then write new CONTROL file
if(all(!input$use_controlnew,!input$user_model))
{
#Change to 1 platoon
if(!is.null(input$GT1)){if(input$GT1){ctl.file$N_platoon<-1}}
#LENGTH or AGE-ONLY
if(all(!is.null(c(rv.Lt$data,rv.Age$data,rv.Index$data)),is.null(rv.Ct$data))==TRUE)
{
fem_vbgf<-VBGF(input$Linf_f,input$k_f,input$t0_f,c(0:Nages()))
#Females
ctl.file$MG_parms[1,3]<-input$M_f #M
#ctl.file$MG_parms[2,3:4]<-fem_vbgf[1] #L0
ctl.file$Growth_Age_for_L1<-input$t0_f
ctl.file$MG_parms[2,3:4]<-0 #L0
ctl.file$MG_parms[3,3:4]<-input$Linf_f #Linf
ctl.file$MG_parms[4,3:4]<-input$k_f #k
ctl.file$MG_parms[5,3:4]<-as.numeric(trimws(unlist(strsplit(input$CV_lt_f,","))))[1] #CV
ctl.file$MG_parms[6,3:4]<-as.numeric(trimws(unlist(strsplit(input$CV_lt_f,","))))[2] #CV
#Maturity
ctl.file$MG_parms[9,3:4]<-input$L50_f #Lmat50%
ctl.file$MG_parms[10,3:4]<- log(0.05/0.95)/(input$L95_f-input$L50_f) #Maturity slope
#ctl.file$MG_parms[11,3:4]<-input$Fec_a_f #coefficient
#ctl.file$MG_parms[12,3:4]<- input$Fec_b_f #exponent
#Males
ctl.file$MG_parms[13,3]<-input$M_f #M
#ctl.file$MG_parms[14,3:4]<-fem_vbgf[1] #L0
ctl.file$MG_parms[14,3:4]<-0 #L0
ctl.file$MG_parms[15,3:4]<-input$Linf_f #Linf
ctl.file$MG_parms[16,3:4]<-input$k_f #k
ctl.file$MG_parms[17,3:4]<-as.numeric(trimws(unlist(strsplit(input$CV_lt_f,","))))[1] #CV
ctl.file$MG_parms[18,3:4]<-as.numeric(trimws(unlist(strsplit(input$CV_lt_f,","))))[2] #CV
#ctl.file$MG_parms[19,3:4]<-input$WLa_f #coefficient
#ctl.file$MG_parms[20,3:4]<-input$WLb_f #exponent
if(input$male_offset)
{
ctl.file$parameter_offset_approach<-2 #Change to offset approach
ctl.file$MG_parms[13,3:4]<-0 #M
ctl.file$MG_parms[14,3:4]<-0 #L0
ctl.file$MG_parms[15,3:4]<-0 #Linf
ctl.file$MG_parms[16,3:4]<-0 #k
ctl.file$MG_parms[17,3:4]<-0 #CV
ctl.file$MG_parms[18,3:4]<-0 #CV
#Weight-length
ctl.file$MG_parms[19,3:4]<-0 #coefficient
ctl.file$MG_parms[20,3:4]<-0 #exponent
}
if(input$male_parms)
{
male_vbgf<-VBGF(input$Linf_m,input$k_m,input$t0_m,c(input$t0_f:Nages()))
ctl.file$MG_parms[13,3]<-input$M_m #M
ctl.file$MG_parms[14,3:4]<-male_vbgf[1] #L0
ctl.file$MG_parms[15,3:4]<-input$Linf_m #Linf
ctl.file$MG_parms[16,3:4]<-input$k_m #k
ctl.file$MG_parms[17,3:4]<-as.numeric(trimws(unlist(strsplit(input$CV_lt_m,","))))[1] #CV
ctl.file$MG_parms[18,3:4]<-as.numeric(trimws(unlist(strsplit(input$CV_lt_m,","))))[2] #CV
# ctl.file$MG_parms[19,3:4]<-input$WLa_m #coefficient
# ctl.file$MG_parms[20,3:4]<-input$WLb_m #exponent
}
if(input$Ct_F_LO_select=="Estimate F"){ctl.file$SR_parms[1,7]=-1} #lnR0
if(input$Ct_F_LO_select=="Constant Catch"){ctl.file$SR_parms[1,7]=1} #lnR0
ctl.file$SR_parms[2,3:4]<-input$h_LO #steepnes
}
#LENGTH and CATCH with fixed parameters
if(all(any(input$est_parms==FALSE,input$est_parms2==FALSE),any(all(!is.null(rv.Lt$data),!is.null(rv.Ct$data)),all(!is.null(rv.Age$data),!is.null(rv.Ct$data)),all(!is.null(rv.Index$data),!is.null(rv.Ct$data))))==TRUE)
{
fem_vbgf<-VBGF(input$Linf_f_fix,input$k_f_fix,input$t0_f_fix,c(0:Nages()))
#Females
ctl.file$MG_parms[1,3]<-input$M_f_fix #M
#ctl.file$MG_parms[2,3:4]<-fem_vbgf[1] #L0
ctl.file$Growth_Age_for_L1<-input$t0_f_fix
ctl.file$MG_parms[2,3:4]<-0 #L0
ctl.file$MG_parms[3,3:4]<-input$Linf_f_fix #Linf
ctl.file$MG_parms[4,3:4]<-input$k_f_fix #k
ctl.file$MG_parms[5,3:4]<-as.numeric(trimws(unlist(strsplit(input$CV_lt_f_fix,","))))[1] #CV
ctl.file$MG_parms[6,3:4]<-as.numeric(trimws(unlist(strsplit(input$CV_lt_f_fix,","))))[2] #CV
#Weight-length
ctl.file$MG_parms[7,3:4]<-input$WLa_f_fix #coefficient
ctl.file$MG_parms[8,3:4]<- input$WLb_f_fix #exponent
#Maturity
ctl.file$MG_parms[9,3:4]<-input$L50_f_fix #Lmat50%
ctl.file$MG_parms[10,3:4]<- log(0.05/0.95)/(input$L95_f_fix-input$L50_f_fix) #Maturity slope
ctl.file$MG_parms[11,3:4]<-input$Fec_a_f_fix #coefficient
ctl.file$MG_parms[12,3:4]<- input$Fec_b_f_fix #exponent
#Males
ctl.file$MG_parms[13,3]<-input$M_f_fix #M
#ctl.file$MG_parms[14,3:4]<-fem_vbgf[1] #L0
ctl.file$MG_parms[14,3:4]<-0 #L0
ctl.file$MG_parms[15,3:4]<-input$Linf_f_fix #Linf
ctl.file$MG_parms[16,3:4]<-input$k_f_fix #k
ctl.file$MG_parms[17,3:4]<-as.numeric(trimws(unlist(strsplit(input$CV_lt_f_fix,","))))[1] #CV
ctl.file$MG_parms[18,3:4]<-as.numeric(trimws(unlist(strsplit(input$CV_lt_f_fix,","))))[2] #CV
ctl.file$MG_parms[19,3:4]<-input$WLa_f_fix #coefficient
ctl.file$MG_parms[20,3:4]<- input$WLb_f_fix #exponent
if(input$male_offset_fix)
{
ctl.file$parameter_offset_approach<-2 #Change to offset approach
ctl.file$MG_parms[13,3:4]<-0 #M
ctl.file$MG_parms[14,3:4]<-0 #L0
ctl.file$MG_parms[15,3:4]<-0 #Linf
ctl.file$MG_parms[16,3:4]<-0 #k
ctl.file$MG_parms[17,3:4]<-0 #CV
ctl.file$MG_parms[18,3:4]<-0 #CV
#Weight-length
ctl.file$MG_parms[19,3:4]<-0 #coefficient
ctl.file$MG_parms[20,3:4]<-0 #exponent
}
if(input$male_parms_fix)
{
male_vbgf<-VBGF(input$Linf_m_fix,input$k_m_fix,input$t0_m_fix,c(input$t0_f_fix:Nages()))
ctl.file$MG_parms[13,3]<-input$M_m_fix #M
ctl.file$MG_parms[14,3:4]<-male_vbgf[1] #L0
ctl.file$MG_parms[15,3:4]<-input$Linf_m_fix #Linf
ctl.file$MG_parms[16,3:4]<-input$k_m_fix #k
ctl.file$MG_parms[17,3:4]<-as.numeric(trimws(unlist(strsplit(input$CV_lt_m_fix,","))))[1] #CV
ctl.file$MG_parms[18,3:4]<-as.numeric(trimws(unlist(strsplit(input$CV_lt_m_fix,","))))[2] #CV
#Weight-length
ctl.file$MG_parms[19,3:4]<-input$WLa_m_fix #coefficient
ctl.file$MG_parms[20,3:4]<-input$WLb_m_fix #exponent
}
#S-R
ctl.file$SR_parms[1,3:4]<-input$lnR0 #lnR0
ctl.file$SR_parms[2,3:4]<-input$h #steepnes
}
#LENGTH and CATCH with estimated parameters
if(all(any(input$est_parms==TRUE,input$est_parms2==FALSE),any(all(!is.null(rv.Lt$data),!is.null(rv.Ct$data)),all(!is.null(rv.Age$data),!is.null(rv.Ct$data)),all(!is.null(rv.Index$data),!is.null(rv.Ct$data))))==TRUE)
{
fem_vbgf<-VBGF(input$Linf_f_mean,input$k_f_mean,input$t0_f_mean,c(0:Nages()))
#c("lognormal","truncated normal","uniform","beta")
prior.name<-c("no prior","symmetric beta", "beta","lognormal","gamma","normal")
prior.type<-c(0:3,5,6)
#Females
#M
if(input$M_f_prior=="lognormal"){ctl.file$MG_parms[1,3:4]<-c(input$M_f_mean,log(input$M_f_mean))}
else {ctl.file$MG_parms[1,3:4]<-c(input$M_f_mean,input$M_f_mean)}
ctl.file$MG_parms[1,5]<-input$M_f_SD
ctl.file$MG_parms[1,6]<-prior.type[prior.name==input$M_f_prior]
ctl.file$MG_parms[1,7]<-input$M_f_phase
#L0
ctl.file$Growth_Age_for_L1<-input$t0_f_mean
# if(input$t0_f_prior=="lognormal"){ctl.file$MG_parms[2,3:4]<-c(fem_vbgf[1],log(fem_vbgf[1]))}
# else {ctl.file$MG_parms[2,3:4]<-fem_vbgf[1]}
if(input$t0_f_prior=="lognormal"){ctl.file$MG_parms[2,3:4]<-c(0,log(0.0000001))}
else {ctl.file$MG_parms[2,3:4]<-0}
ctl.file$MG_parms[2,5]<-input$t0_f_SD
ctl.file$MG_parms[2,6]<-prior.type[prior.name==input$t0_f_prior]
ctl.file$MG_parms[2,7]<-input$t0_f_phase
#Linf
if(input$Linf_f_prior=="lognormal"){ctl.file$MG_parms[3,3:4]<-c(input$Linf_f_mean,log(input$Linf_f_mean))}
else{ctl.file$MG_parms[3,3:4]<-input$Linf_f_mean}
ctl.file$MG_parms[3,5]<-input$Linf_f_SD
ctl.file$MG_parms[3,6]<-prior.type[prior.name==input$Linf_f_prior]
ctl.file$MG_parms[3,7]<-input$Linf_f_phase
#k
if(input$k_f_prior=="lognormal"){ctl.file$MG_parms[4,3:4]<-c(input$k_f_mean,log(input$k_f_mean))}
else {ctl.file$MG_parms[4,3:4]<-input$k_f_mean}
ctl.file$MG_parms[4,5]<-input$k_f_SD
ctl.file$MG_parms[4,6]<-prior.type[prior.name==input$k_f_prior]
ctl.file$MG_parms[4,7]<-input$k_f_phase
#CV young
if(input$CV_lt_f_young_prior=="lognormal"){ctl.file$MG_parms[5,3:4]<-c(input$CV_lt_f_young_mean,log(input$CV_lt_f_young_mean))}
else{ctl.file$MG_parms[5,3:4]<-input$CV_lt_f_young_mean}
ctl.file$MG_parms[5,5]<-input$CV_lt_f_young_SD
ctl.file$MG_parms[5,6]<-prior.type[prior.name==input$CV_lt_f_young_prior]
ctl.file$MG_parms[5,7]<-input$CV_lt_f_young_phase
#CV old
if(input$CV_lt_f_old_prior=="lognormal"){ctl.file$MG_parms[6,3:4]<-c(input$CV_lt_f_old_mean,log(input$CV_lt_f_old_mean))}
else{ctl.file$MG_parms[6,3:4]<-input$CV_lt_f_old_mean}
ctl.file$MG_parms[6,3:4]<-input$CV_lt_f_old_mean
ctl.file$MG_parms[6,5]<-input$CV_lt_f_old_SD
ctl.file$MG_parms[6,6]<-prior.type[prior.name==input$CV_lt_f_old_prior]
ctl.file$MG_parms[6,7]<-input$CV_lt_f_old_phase
#Weight-length
ctl.file$MG_parms[7,3:4]<-input$WLa_f_est #coefficient
ctl.file$MG_parms[8,3:4]<- input$WLb_f_est #exponent
#Maturity
ctl.file$MG_parms[9,3:4]<-input$L50_f_est #Lmat50%
ctl.file$MG_parms[10,3:4]<- log(0.05/0.95)/(input$L95_f_est-input$L50_f_est) #Maturity slope
ctl.file$MG_parms[11,3:4]<-input$Fec_a_f_est #coefficient
ctl.file$MG_parms[12,3:4]<- input$Fec_b_f_est #exponent
#Males
ctl.file$MG_parms[13,3:4]<-c(input$M_f_mean,log(input$M_f_mean)) #M
#ctl.file$MG_parms[14,3:4]<-fem_vbgf[1] #L0
ctl.file$MG_parms[14,3:4]<-0 #L0
ctl.file$MG_parms[15,3:4]<-input$Linf_f_mean #Linf
ctl.file$MG_parms[16,3:4]<-input$k_f_mean #k
ctl.file$MG_parms[17,3:4]<-input$CV_lt_f_old_mean #CV
ctl.file$MG_parms[18,3:4]<-input$CV_lt_f_old_mean #CV
#Weight-length
ctl.file$MG_parms[19,3:4]<-input$WLa_f_est #coefficient
ctl.file$MG_parms[20,3:4]<- input$WLb_f_est #exponent
if(input$male_offset_est)
{
ctl.file$parameter_offset_approach<-2 #Change to offset approach
ctl.file$MG_parms[13,3:4]<-0 #M
ctl.file$MG_parms[14,3:4]<-0 #L0
ctl.file$MG_parms[15,3:4]<-0 #Linf
ctl.file$MG_parms[16,3:4]<-0 #k
ctl.file$MG_parms[17,3:4]<-0 #CV
ctl.file$MG_parms[18,3:4]<-0 #CV
#Weight-length
ctl.file$MG_parms[19,3:4]<-0 #coefficient
ctl.file$MG_parms[20,3:4]<-0 #exponent
}
if(input$male_parms_est)
{
male_vbgf_est<-VBGF(input$Linf_m_mean,input$k_m_mean,input$t0_m_mean,c(input$t0_f_mean:Nages()))
# ctl.file$MG_parms[13,3]<-input$M_m_mean #M
# ctl.file$MG_parms[14,3:4]<-male_vbgf_est[1] #L0
# ctl.file$MG_parms[15,3:4]<-input$Linf_m_mean #Linf
# ctl.file$MG_parms[16,3:4]<-input$k_m_mean #k
# ctl.file$MG_parms[17,3:4]<-input$CV_lt_m_mean #CV
# ctl.file$MG_parms[18,3:4]<-input$CV_lt_m_mean #CV
#M
if(input$M_m_prior=="lognormal"){ctl.file$MG_parms[13,3:4]<-c(input$M_m_mean,log(input$M_m_mean))}
else {ctl.file$MG_parms[13,3:4]<-c(input$M_m_mean,input$M_m_mean)}
ctl.file$MG_parms[13,5]<-input$M_m_SD
ctl.file$MG_parms[13,6]<-prior.type[prior.name==input$M_m_prior]
ctl.file$MG_parms[13,7]<-input$M_m_phase
#L0
#if(input$t0_f_prior=="lognormal"){ctl.file$MG_parms[14,3:4]<-c(male_vbgf_est[1],log(male_vbgf_est[1]+0.000000001))}
#else {ctl.file$MG_parms[14,3:4]<-male_vbgf_est[1]}
if(input$t0_m_prior=="lognormal"){ctl.file$MG_parms[14,3:4]<-c(male_vbgf_est[1],log(male_vbgf_est[1]+0.000000001))}
else {ctl.file$MG_parms[14,3:4]<-male_vbgf_est[1]}
ctl.file$MG_parms[14,5]<-input$t0_m_SD
ctl.file$MG_parms[14,6]<-prior.type[prior.name==input$t0_m_prior]
ctl.file$MG_parms[14,7]<-input$t0_m_phase
#Linf
if(input$Linf_m_prior=="lognormal"){ctl.file$MG_parms[15,3:4]<-c(input$Linf_m_mean,log(input$Linf_m_mean))}
else{ctl.file$MG_parms[15,3:4]<-input$Linf_m_mean}
ctl.file$MG_parms[15,5]<-input$Linf_m_SD
ctl.file$MG_parms[15,6]<-prior.type[prior.name==input$Linf_m_prior]
ctl.file$MG_parms[15,7]<-input$Linf_m_phase
#k
if(input$k_m_prior=="lognormal"){ctl.file$MG_parms[16,3:4]<-c(input$k_m_mean,log(input$k_m_mean))}
else {ctl.file$MG_parms[16,3:4]<-input$k_m_mean}
ctl.file$MG_parms[16,5]<-input$k_m_SD
ctl.file$MG_parms[16,6]<-prior.type[prior.name==input$k_m_prior]
ctl.file$MG_parms[16,7]<-input$k_m_phase
#CV young
if(input$CV_lt_m_young_prior=="lognormal"){ctl.file$MG_parms[17,3:4]<-c(input$CV_lt_m_young_mean,log(input$CV_lt_m_young_mean))}
else{ctl.file$MG_parms[17,3:4]<-input$CV_lt_m_young_mean}
ctl.file$MG_parms[17,5]<-input$CV_lt_m_young_SD
ctl.file$MG_parms[17,6]<-prior.type[prior.name==input$CV_lt_m_young_prior]
ctl.file$MG_parms[17,7]<-input$CV_lt_m_young_phase
#CV old
if(input$CV_lt_m_old_prior=="lognormal"){ctl.file$MG_parms[18,3:4]<-c(input$CV_lt_m_old_mean,log(input$CV_lt_m_old_mean))}
else{ctl.file$MG_parms[18,3:4]<-input$CV_lt_m_old_mean}
ctl.file$MG_parms[18,5]<-input$CV_lt_m_old_SD
ctl.file$MG_parms[18,6]<-prior.type[prior.name==input$CV_lt_m_old_prior]
ctl.file$MG_parms[18,7]<-input$CV_lt_m_old_phase
#Weight-length
ctl.file$MG_parms[19,3:4]<-input$WLa_m_est #coefficient
ctl.file$MG_parms[20,3:4]<- input$WLb_m_est #exponent
}
#S-R
ctl.file$SR_parms[1,3:4]<-input$lnR0_est #lnR0
if(input$h_ss_prior=="lognormal"){ctl.file$SR_parms[2,3:4]<-c(input$h_mean_ss,log(h_mean_ss))}
else{ctl.file$SR_parms[2,3:4]<-input$h_mean_ss}
ctl.file$SR_parms[2,5]<-input$h_SD_ss
ctl.file$SR_parms[2,6]<-prior.type[prior.name==input$h_ss_prior]
ctl.file$SR_parms[2,7]<-input$h_phase
}
#Recruitment estimation
ctl.file$do_recdev<-0
ctl.file$recdev_phase<- -1
ctl.file$MainRdevYrFirst<-input$styr #Start year of recruitment estimation
ctl.file$MainRdevYrLast<-input$endyr #Last year of recruitment estimation
ctl.file$last_early_yr_nobias_adj<-input$styr #End year of early rev devs (no bias)
ctl.file$first_yr_fullbias_adj<-input$styr #First year full bias
ctl.file$last_yr_fullbias_adj<-input$endyr #Last year full bias
ctl.file$first_recent_yr_nobias_adj<-input$endyr #First year recent no bias
if(input$rec_choice)
{
ctl.file$SR_parms[3,3:4]<-input$sigmaR #sigma R
if(input$RecDevChoice=="1: Devs sum to zero"){ctl.file$do_recdev<-1}
if(input$RecDevChoice=="2: Simple deviations"){ctl.file$do_recdev<-2}
if(input$RecDevChoice=="3: deviation vector"){ctl.file$do_recdev<-3}
if(input$RecDevChoice=="4: option 3 plus penalties"){ctl.file$do_recdev<-4}
ctl.file$MainRdevYrFirst<-input$Rdev_startyr #Start year of recruitment estimation
ctl.file$MainRdevYrLast<-input$Rdev_endyr #Last year of recruitment estimation
ctl.file$recdev_phase<- 1
if(input$biasC_choice)
{
#With bias correction
ctl.file$recdev_early_start<--1 #Year early rec dev phase starts
ctl.file$recdev_early_phase<-3 #Early rec dev phase
ctl.file$Fcast_recr_phase<-0 #Forecast rec dev phase
ctl.file$last_early_yr_nobias_adj<-input$NobiasC_early #End year of early rev devs (no bias)
ctl.file$first_yr_fullbias_adj<-input$BiasC_startyr #First year full bias
ctl.file$last_yr_fullbias_adj<-input$BiasC_endyr #Last year full bias
ctl.file$first_recent_yr_nobias_adj<-input$NobiasC_recent #First year recent no bias
ctl.file$max_bias_adj<-input$BiasC #Max bias adjustment
}
}
#SELECTIVITY
#Length Selectivity
if(input$Ct_F_LO_select=="Estimate F" & is.null(rv.Ct$data)){ctl.file$size_selex_types[2]<-3} #Change to recognize discard fishery
Sel50<-as.numeric(trimws(unlist(strsplit(input$Sel50,","))))
Sel50_phase<-as.numeric(trimws(unlist(strsplit(input$Sel50_phase,","))))
Selpeak<-as.numeric(trimws(unlist(strsplit(input$Selpeak,","))))
Selpeak_phase<-as.numeric(trimws(unlist(strsplit(input$Selpeak_phase,","))))
bin.width<-data.file$lbin_vector[2]-data.file$lbin_vector[1]
minmaxbin<-min(Selpeak[1]-min(data.file$lbin_vector),max(data.file$lbin_vector)-Selpeak[1])
sel.inputs.comps<-length(Sel50)-length(Sel50_phase)-length(Selpeak)-length(Selpeak_phase)
sel.inputs.lts<-c(length(Sel50),length(Sel50_phase),length(Selpeak),length(Selpeak_phase))
if(input$Sel_choice=="Logistic")
{
#Throw warning if not enough selectivity inputs
if(!all(data.file$Nfleets==sel.inputs.lts))
{
sendSweetAlert(
session = session,
title = "Selectivity input warning",
text = "Please check to see if you have provided selectivity inputs (both parameter and phases) for all fleets in the model. This includes fishing fleets and surverys.",
type = "error")
remove_modal_spinner()
stopApp()
}
#ctl.file$size_selex_parms[1,1:2]<-c(min(data.file$lbin_vector)+2*bin.width,max(data.file$lbin_vector)-2*bin.width)
#ctl.file$size_selex_parms[1,1:2]<-c(min(data.file$lbin_vector),max(data.file$lbin_vector))
ctl.file$size_selex_parms[1,1:2]<-c(Selpeak[1]-minmaxbin,Selpeak[1]+minmaxbin)
ctl.file$size_selex_parms[1,3:4]<- Selpeak[1]
ctl.file$size_selex_parms[2,3:4]<- 15
ctl.file$size_selex_parms[3,3:4]<- log(-((Sel50[1]-Selpeak[1])^2/log(0.5)))
ctl.file$size_selex_parms[4,3:4]<- -15
ctl.file$size_selex_parms[6,3:4]<- 15
#phases
ctl.file$size_selex_parms[1,7]<- Selpeak_phase[1]
ctl.file$size_selex_parms[2,7]<- -1
ctl.file$size_selex_parms[3,7]<- Sel50_phase[1]
ctl.file$size_selex_parms[4,7]<- -1
ctl.file$size_selex_parms[6,7]<- -1
}
if(input$Sel_choice=="Dome-shaped")
{
#Throw warning if not enough selectivity inputs
sel.inputs.comps<-length(Sel50)-length(Sel50_phase)-length(Selpeak)-length(Selpeak_phase)-length(PeakDesc)-length(PeakDesc_phase)-length(LtPeakFinal)-length(LtPeakFinal_phase)-length(FinalSel)-length(FinalSel_phase)
sel.inputs.lts<-c(length(Sel50),length(Sel50_phase),length(Selpeak),length(Selpeak_phase),length(PeakDesc),length(PeakDesc_phase),length(LtPeakFinal),length(LtPeakFinal_phase),length(FinalSel),length(FinalSel_phase))
if(!all(data.file$Nfleets==sel.inputs.lts))
{
sendSweetAlert(
session = session,
title = "Selectivity input warning",
text = "Please check to see if you have provided selectivity inputs (both parameter and phases) for all fleets in the model. This includes fishing fleets and surverys.",
type = "error")
remove_modal_spinner()
break
}
PeakDesc<-as.numeric(trimws(unlist(strsplit(input$PeakDesc,","))))
PeakDesc_phase<-as.numeric(trimws(unlist(strsplit(input$PeakDesc_phase,","))))
LtPeakFinal<-as.numeric(trimws(unlist(strsplit(input$LtPeakFinal,","))))
LtPeakFinal_phase<-as.numeric(trimws(unlist(strsplit(input$LtPeakFinal_phase,","))))
FinalSel<-as.numeric(trimws(unlist(strsplit(input$FinalSel,","))))
FinalSel_phase<-as.numeric(trimws(unlist(strsplit(input$FinalSel_phase,","))))
minmaxbin<-min(Selpeak[1]-min(data.file$lbin_vector),max(data.file$lbin_vector)-Selpeak[1])
#ctl.file$size_selex_parms[1,1:2]<-c(min(data.file$lbin_vector)+2*bin.width,max(data.file$lbin_vector)-2*bin.width)
#ctl.file$size_selex_parms[1,1:2]<-c(min(data.file$lbin_vector),max(data.file$lbin_vector))
ctl.file$size_selex_parms[1,1:2]<-c(Selpeak[1]-minmaxbin,Selpeak[1]+minmaxbin)
ctl.file$size_selex_parms[1,3:4]<- Selpeak[1]
ctl.file$size_selex_parms[2,3:4]<- -log((max(data.file$lbin_vector)-Selpeak[1]-bin.width)/(PeakDesc[1]-Selpeak[1]-bin.width))
ctl.file$size_selex_parms[3,3:4]<- log(-((Sel50[1]-Selpeak[1])^2/log(0.5)))
ctl.file$size_selex_parms[4,3:4]<- log(LtPeakFinal[1])
ctl.file$size_selex_parms[6,3:4]<- -log((1/(FinalSel[1]+0.000000001)-1))
#phases
ctl.file$size_selex_parms[1,7]<- Selpeak_phase[1]
ctl.file$size_selex_parms[2,7]<- PeakDesc_phase[1]
ctl.file$size_selex_parms[3,7]<- Sel50_phase[1]
ctl.file$size_selex_parms[4,7]<- LtPeakFinal_phase[1]
ctl.file$size_selex_parms[6,7]<- FinalSel_phase[1]
}
# if(input$dirichlet)
# {
# dirichlet.index<-c(unique(data.file$lencomp[,3]),(unique(data.file$agecomp[,3])+3))
# ctl.file$dirichlet_parms[dirichlet.index,3:4]<-0
# ctl.file$dirichlet_parms[dirichlet.index,7]<-2
# }
#Add other fleets
if(data.file$Nfleets>1){
for(i in 1:(data.file$Nfleets-1))
{
ctl.file$init_F<-rbind(ctl.file$init_F,ctl.file$init_F[1,])
ctl.file$size_selex_types<-rbind(ctl.file$size_selex_types,ctl.file$size_selex_types[1,])
if(input$Ct_F_LO_select=="Estimate F" & is.null(rv.Ct$data)){ctl.file$size_selex_types[,2]<-3}
ctl.file$age_selex_types<-rbind(ctl.file$age_selex_types,ctl.file$age_selex_types[1,])
ctl.file$size_selex_parms<-rbind(ctl.file$size_selex_parms,ctl.file$size_selex_parms[1:6,])
minmaxbin<-min(Selpeak[i+1]-min(data.file$lbin_vector),max(data.file$lbin_vector)-Selpeak[i+1])
if(input$Sel_choice=="Logistic")
{
ctl.file$size_selex_parms[6*i+3,3:4]<- log(-((Sel50[i+1]-Selpeak[i+1])^2/log(0.5)))
ctl.file$size_selex_parms[6*i+3,7]<- Sel50_phase[i+1]
#ctl.file$size_selex_parms[6*i+1,1:2]<-c(min(data.file$lbin_vector)+2*bin.width,max(data.file$lbin_vector)-2*bin.width)
# ctl.file$size_selex_parms[6*i+1,1:2]<-c(min(data.file$lbin_vector),max(data.file$lbin_vector))
ctl.file$size_selex_parms[6*i+1,1:2]<-c(Selpeak[i+1]-minmaxbin,Selpeak[1]+minmaxbin)
ctl.file$size_selex_parms[6*i+1,3:4]<- Selpeak[i+1]
ctl.file$size_selex_parms[6*i+1,7]<- Selpeak_phase[i+1]
ctl.file$size_selex_parms[6*i+2,3:4]<- 15
ctl.file$size_selex_parms[6*i+2,7]<- -1
ctl.file$size_selex_parms[6*i+4,3:4]<- -15
ctl.file$size_selex_parms[6*i+4,7]<- -1
ctl.file$size_selex_parms[6*i+6,3:4]<- 15
ctl.file$size_selex_parms[6*i+6,7]<- -1
}
if(input$Sel_choice=="Dome-shaped")
{
# ctl.file$size_selex_parms[6*i+1,1:2]<-c(min(data.file$lbin_vector),max(data.file$lbin_vector))
ctl.file$size_selex_parms[6*i+1,1:2]<-c(Selpeak[i+1]-minmaxbin,Selpeak[1]+minmaxbin)
ctl.file$size_selex_parms[6*i+1,3:4]<- Selpeak[i+1]
ctl.file$size_selex_parms[6*i+1,7]<- Selpeak_phase[i+1]
ctl.file$size_selex_parms[6*i+2,3:4]<- -log((max(data.file$lbin_vector)-Selpeak[i+1]-bin.width)/(PeakDesc[i+1]-Selpeak[i+1]-bin.width))
ctl.file$size_selex_parms[6*i+2,7]<- PeakDesc_phase[i+1]
ctl.file$size_selex_parms[6*i+3,3:4]<- log(-((Sel50[i+1]-Selpeak[i+1])^2/log(0.5)))
ctl.file$size_selex_parms[6*i+3,7]<- Sel50_phase[i+1]
ctl.file$size_selex_parms[6*i+4,3:4]<- log(LtPeakFinal[i+1])
ctl.file$size_selex_parms[6*i+4,7]<- LtPeakFinal_phase[i+1]
ctl.file$size_selex_parms[6*i+6,3:4]<- -log((1/(FinalSel[i+1]+0.000000001)-1))
ctl.file$size_selex_parms[6*i+6,7]<- FinalSel_phase[i+1]
}
#Dirichlet data-weighting
# ctl.file$dirichlet_parms<-rbind(ctl.file$dirichlet_parms,ctl.file$dirichlet_parms[1:2,])
}
#Re-label so r4ss can interpret these new entries
rownames(ctl.file$init_F)<-paste0("InitF_seas_1_flt_",1:data.file$Nfleets,"Fishery",1:data.file$Nfleets)
rownames(ctl.file$age_selex_types)<-rownames(ctl.file$size_selex_types)<-paste0("Fishery",1:data.file$Nfleets)
size_selex_parms_rownames<-list()
for(f_i in 1:data.file$Nfleets)
{
size_selex_parms_rownames[[f_i]]<-c(paste0("SizeSel_P_1_Fishery",f_i,"(",f_i,")"),
paste0("SizeSel_P_2_Fishery",f_i,"(",f_i,")"),
paste0("SizeSel_P_3_Fishery",f_i,"(",f_i,")"),
paste0("SizeSel_P_4_Fishery",f_i,"(",f_i,")"),
paste0("SizeSel_P_5_Fishery",f_i,"(",f_i,")"),
paste0("SizeSel_P_6_Fishery",f_i,"(",f_i,")"))
}
size_selex_parms_rownames<-unlist(size_selex_parms_rownames)
rownames(ctl.file$size_selex_parms)<-size_selex_parms_rownames
}
#Remove surveys from initial F lines and add q and xtra variance lines
if(!is.null(rv.Index$data))
{
if(data.file$Nfleets>catch.fleets){ctl.file$init_F<-ctl.file$init_F[-survey.fleets,]}
q.setup.names<-c("fleet","link","link_info","extra_se","biasadj", "float")
q.setup.lines<-data.frame(t(c(unique(rv.Index$data[,3])[1],1,0,0,0,1)))
if(input$Indexvar){q.setup.lines<-data.frame(t(c(unique(rv.Index$data[,3])[1],1,0,1,0,1)))}
qnames<-c("LO","HI","INIT","PRIOR","PR_SD","PR_type","PHASE","env_var&link","dev_link","dev_minyr","dev_maxyr","dev_PH","Block","Block_Fxn")
q.lines<-data.frame(t(c(-15,15,1,0,1,0,-1,rep(0,7))))
if(input$Indexvar){q.lines<-data.frame(rbind(c(-15,15,1,0,1,0,-1,rep(0,7)),c(0,5,0,0,99,0,3,0,0,0,0,0,0,0)))}
if(length(unique(rv.Index$data[,3]))>1)
{
for(q in 2:length(unique(rv.Index$data[,3])))
{
if(!input$Indexvar)
{
q.setup.lines<-rbind(q.setup.lines,c(unique(rv.Index$data[,3])[q],1,0,0,0,1))
q.lines<-rbind(q.lines,c(-15,15,1,0,1,0,-1,rep(0,7)))
}
if(input$Indexvar)
{
q.setup.lines<-rbind(q.setup.lines,c(unique(rv.Index$data[,3])[q],1,0,1,0,1))
#if(unique(rv.Index$data[,6])[q]!="RSS"){q.setup.lines<-rbind(q.setup.lines,c(unique(rv.Index$data[,3])[q],1,0,1,0,1))}
#if(unique(rv.Index$data[,6])[q]=="RSS"){q.setup.lines<-rbind(q.setup.lines,c(unique(rv.Index$data[,3])[q],1,0,0,0,1))}
if(unique(rv.Index$data[,6])[q]!="RSS"){q.lines<-rbind(q.lines,data.frame(rbind(c(-15,15,1,0,1,0,-1,rep(0,7)),c(0,5,0,0,99,0,3,0,0,0,0,0,0,0))))}
if(unique(rv.Index$data[,6])[q]=="RSS"){q.lines<-rbind(q.lines,data.frame(rbind(c(-15,15,1,0,1,0,-1,rep(0,7)),c(0,5,0,0,99,0,-3,0,0,0,0,0,0,0))))}
}
}
}
names(q.setup.lines)<-q.setup.names
rownames(q.setup.lines)<-unique(rv.Index$data[,6])
ctl.file$Q_options<-q.setup.lines
names(q.lines)<-qnames
if(!input$Indexvar){rownames(q.lines)<-paste0("LnQ_base_",unique(rv.Index$data[,6]),"(",unique(rv.Index$data[,3]),")")}
#rnames.temp<-c(paste0("LnQ_base_",unique(rv.Index$data[,5]),"(",unique(rv.Index$data[,2]),")"),paste0("Q_extraSD_",unique(rv.Index$data[,5]),"(",unique(rv.Index$data[,2]),")"))
#rnames.temp[1:length(rnames.temp)%%2 != 0]
if(input$Indexvar)
{
qnames.temp1<-paste0("LnQ_base_",unique(rv.Index$data[,6]),"(",unique(rv.Index$data[,3]),")")
qnames.temp2<-paste0("Q_extraSD_",unique(rv.Index$data[,6]),"(",unique(rv.Index$data[,3]),")")
qnames.temp<-as.vector(rbind(qnames.temp1,qnames.temp2))
# if(length(rnames.temp1)>1)
# {
# for(xx in 2:length(rnames.temp1))
# {
# rnames.temp<-c(rnames.temp1[x],rnames.temp2[x])
# }
# }
rownames(q.lines)<-qnames.temp
}
ctl.file$Q_parms<-q.lines
if(data.file$Nfleets>catch.fleets)
{
if(any(fleet.survey.names=="RSS"))
{
RSS.index<-grep("RSS",fleet.survey.names)
#ctl.file$Q_parms<-ctl.file$Q_parms
ctl.file$size_selex_types[RSS.index,1]<-0 #Rename RSS selectivity types
ctl.file$size_selex_parms<-ctl.file$size_selex_parms[-c((RSS.index*6-5):(RSS.index*6)),] #Remove selectivity related to RSS
}
}
}
# if(input$Data_wt=="Dirichlet")
# {
# Dirichlet.fleets<-c(unique(data.file$lencomp[,3]),(unique(data.file$agecomp[,3])+data.file$Nfleets))
# # if(Dirichlet.fleets>1)
# # {
# # for(i in 1:length(Dirichlet.fleets)){ctl.file$dirichlet_parms<-rbind(ctl.file$dirichlet_parms,ctl.file$dirichlet_parms[1,])}
# # }
# ctl.file$dirichlet_parms[Dirichlet.fleets,3:4]<-0.5
# ctl.file$dirichlet_parms[Dirichlet.fleets,7]<-2
# }
#Change data weights
# Lt_dat_wts<-as.numeric(trimws(unlist(strsplit(input$Lt_datawts,","))))
# ctl.file$Variance_adjustments[1,]<-Lt_dat_wts
#Change likelihood component weight of catch
if (is.null(rv.Ct$data))
{
lts.lambdas<-ctl.file$lambdas[1,]
ct.lambdas<-ctl.file$lambdas[2,]
init.ct.lambdas<-ctl.file$lambdas[3,]
if(data.file$Nfleets>1)
{
for(i_lam in 2:data.file$Nfleets)
{
lts.lambdas_temp<-ctl.file$lambdas[1,]
ct.lambdas_temp<-ct.lambdas[1,]
init.ct.lambdas_temp<-init.ct.lambdas[1,]
lts.lambdas_temp[1,2]<-ct.lambdas_temp[1,2]<-init.ct.lambdas_temp[1,2]<-i_lam
lts.lambdas<-rbind(lts.lambdas,lts.lambdas_temp)
ct.lambdas<-rbind(ct.lambdas,ct.lambdas_temp)
init.ct.lambdas<-rbind(init.ct.lambdas,init.ct.lambdas_temp)
}
}
if(input$Ct_F_LO_select=="Estimate F")
{
if(data.file$Nfleets>1)
{
lt.lam.in<-as.numeric(trimws(unlist(strsplit(input$Wt_fleet_Ct,","))))/sum(as.numeric(trimws(unlist(strsplit(input$Wt_fleet_Ct,",")))))
lt.lam<-lt.lam.in/max(lt.lam.in)
lts.lambdas[,4]<-lt.lam
}
if(data.file$Nfleets==1)
{
lts.lambdas[,4]<-1
}
}
rownames(lts.lambdas)<-paste0("length_Fishery",c(1:data.file$Nfleets),"_sizefreq_method_1_Phz1")
ct.lambdas[,4]<-0
rownames(ct.lambdas)<-paste0("catch_Fishery",c(1:data.file$Nfleets),"_Phz1")
init.ct.lambdas[,4]<-0
rownames(init.ct.lambdas)<-paste0("init_equ_catch_Fishery",c(1:data.file$Nfleets),"_lambda_for_init_equ_catch_can_only_enable/disable for_all_fleets_Phz1")
ctl.file$lambdas<-rbind(lts.lambdas,ct.lambdas,init.ct.lambdas)
ctl.file$N_lambdas<-nrow(ctl.file$lambdas)
# ctl.file$lambdas[1,4]<-0
}
if(!is.null(rv.Ct$data))
{
ct.lambdas<-ctl.file$lambdas[2,]
init.ct.lambdas<-ctl.file$lambdas[3,]
if(data.file$Nfleets>1)
{
for(i_lam in 2:data.file$Nfleets)
{
ct.lambdas_temp<-ct.lambdas[1,]
init.ct.lambdas_temp<-init.ct.lambdas[1,]
ct.lambdas_temp[1,2]<-init.ct.lambdas_temp[1,2]<-i_lam
ct.lambdas<-rbind(ct.lambdas,ct.lambdas_temp)
init.ct.lambdas<-rbind(init.ct.lambdas,init.ct.lambdas_temp)
}
}
ct.lambdas[,4]<-1
rownames(ct.lambdas)<-paste0("catch_Fishery",c(1:data.file$Nfleets),"_Phz1")
init.ct.lambdas[,4]<-0
ctl.file$lambdas<-rbind(ct.lambdas,init.ct.lambdas)
rownames(init.ct.lambdas)<-paste0("init_equ_catch_Fishery",c(1:data.file$Nfleets),"_lambda_for_init_equ_catch_can_only_enable/disable for_all_fleets_Phz1")
ctl.file$N_lambdas<-data.file$Nfleets*2
#ctl.file$lambdas[1,4]<-1
# ctl.file$lambdas[2,4]<-0
ctl.file$init_F[,3]<-0.00000000000000000001
ctl.file$init_F[,7]<--1
}
SS_writectl(ctl.file,paste0("Scenarios/",input$Scenario_name,"/controlfile.ctl"),overwrite=TRUE)
}
}
}
}
####################### END CTL FILE ####################################
if(exists("checkmod")|input$user_model)
{
starter.file<-SS_readstarter(paste0("Scenarios/",input$Scenario_name,"/starter.ss"))
#Use par file
if(input$use_par)
{
starter.file$init_values_src<-1
}
if(!input$use_par|is.null(input$use_par))
{
starter.file$init_values_src<-0
}
#Use datanew file
if(input$use_datanew)
{
starter.file$datfile<-"data_echo.ss_new"
}
if(!input$use_datanew|is.null(input$use_datanew))
{
if(!input$user_model|is.null(input$use_datanew)){starter.file$datfile<-"datafile.dat"}
}
#Use controlnew file
if(input$use_controlnew)
{
starter.file$ctlfile<-"control.ss_new"
}
if(!input$use_controlnew|is.null(input$use_controlnew))
{
if(!input$user_model|is.null(input$use_controlnew)){starter.file$ctlfile<-"controlfile.ctl"}
}
#Phase 0
if(input$use_phase0)
{
starter.file$last_estimation_phase<-0
}
if(!input$use_par|is.null(input$use_par))
{
starter.file$last_estimation_phase<-6
}
#Jitter selection
starter.file$jitter_fraction<-0
# if(input$jitter_choice)
# {
# starter.file$jitter_fraction<-input$jitter_fraction
# starter.file$init_values_src<-0
# }
SS_writestarter(starter.file,paste0("Scenarios/",input$Scenario_name),overwrite=TRUE)
#Forecast file modfications
#Reference points
if(!input$use_forecastnew)
{
forecast.file<-SS_readforecast(paste0("Scenarios/",input$Scenario_name,"/forecast.ss"))
if(input$RP_choices){
forecast.file$SPRtarget<-input$SPR_target
forecast.file$Btarget<-input$B_target
CR_choices<-c("1: Catch fxn of SSB, buffer on F",
"2: F fxn of SSB, buffer on F",
"3: Catch fxn of SSB, buffer on catch",
"4: F fxn of SSB, buffer on catch")
CR_choices_num.vec<-c(1:4)
forecast.file$ControlRuleMethod<-CR_choices_num.vec[CR_choices==input$CR_Ct_F]
forecast.file$SBforconstantF<-input$slope_hi
forecast.file$BfornoF<-input$slope_low
}
if(input$Forecast_choice)
{
forecast.file$Nforecastyrs<-input$forecast_num
buffer.in<-as.numeric(trimws(unlist(strsplit(input$forecast_buffer,","))))
if(length(buffer.in)==1){forecast.file$Flimitfraction<-buffer.in}
if(length(buffer.in)>1)
{
forecast.file$Flimitfraction<--1
buffer.datafr<-data.frame(Year=c((data.file$endyr+1):(data.file$endyr+input$forecast_num)),Fraction=buffer.in)
#rownames(buffer.datafr)<-paste0("#_Flimitfraction_m",1:input$forecast_num)
forecast.file$Flimitfraction_m<-buffer.datafr
}
}
SS_writeforecast(forecast.file,paste0("Scenarios/",input$Scenario_name),overwrite=TRUE)
}
if(input$use_forecastnew)
{
forecast.file<-SS_readforecast(paste0("Scenarios/",input$Scenario_name,"/forecast.ss_new"))
SS_writeforecast(forecast.file,paste0("Scenarios/",input$Scenario_name),overwrite=TRUE)
}
########
#Run Stock Synthesis and plot output
show_modal_spinner(spin="flower",color=wes_palettes$Zissou1[2],text="Model run in progress")
if(input$Data_wt=="None"){DataWT_opt<-"none"}
if(input$Data_wt=="Dirichlet"){DataWT_opt<-"DM"}
if(input$Data_wt=="Francis"){DataWT_opt<-"Francis"}
if(input$Data_wt=="McAllister-Ianelli"){DataWT_opt<-"MI"}
if(is.null(input$no_hess)){
cmd.in<-""
if(input$add_comms==TRUE){cmd.in=paste0(" ",input$add_comms_in)}
RUN.SS(paste0("Scenarios/",input$Scenario_name),ss.cmd=cmd.in,OS.in=input$OS_choice)
if(!file.exists(paste0("Scenarios/",input$Scenario_name,"data_echo.ss_new")))
{
cmd.in<-" -nohess"
if(input$add_comms==TRUE){cmd.in=paste0(" -nohess ",input$add_comms_in)}
RUN.SS(paste0("Scenarios/",input$Scenario_name),ss.cmd=cmd.in,OS.in=input$OS_choice)
}
}
if(!is.null(input$no_hess))
{
if(input$no_hess)
{
cmd.in<-" -nohess"
if(input$add_comms==TRUE){cmd.in=paste0(" -nohess ",input$add_comms_in)}
RUN.SS(paste0("Scenarios/",input$Scenario_name),ss.cmd=cmd.in,OS.in=input$OS_choice)
}
if(!input$no_hess)
{
cmd.in<-""
if(input$add_comms==TRUE){cmd.in=paste0(" ",input$add_comms_in)}
RUN.SS(paste0("Scenarios/",input$Scenario_name),ss.cmd=cmd.in,OS.in=input$OS_choice)
}
}
if(file.exists(paste0("Scenarios/",input$Scenario_name,"/data_echo.ss_new")))
{
Model.output<-try(SS_output(paste0("Scenarios/",input$Scenario_name),verbose=FALSE,printstats = FALSE))
if(class(Model.output)=="try-error")
{
Model.output<-SS_output(paste0("Scenarios/",input$Scenario_name),verbose=FALSE,printstats = FALSE,covar=FALSE)
}
if(input$Data_wt!="None")
{
if(Model.output$inputs$covar==TRUE)
{
tune_comps(Model.output,dir=paste0("Scenarios/",input$Scenario_name),niters_tuning=3,option=DataWT_opt,show_in_console = TRUE,verbose=FALSE)
Model.output<-try(SS_output(paste0("Scenarios/",input$Scenario_name),verbose=FALSE,printstats = FALSE))
}
if(Model.output$inputs$covar==FALSE)
{
tune_comps(Model.output,dir=paste0("Scenarios/",input$Scenario_name),option=DataWT_opt,niters_tuning=3,extras = " -nohess",show_in_console = TRUE,verbose=FALSE)
Model.output<-SS_output(paste0("Scenarios/",input$Scenario_name),verbose=FALSE,printstats = FALSE,covar=FALSE)
}
}
data.file<-SS_readdat(paste0("Scenarios/",input$Scenario_name,"/data_echo.ss_new"))
#No plots or figures
if(is.null(input$no_plots_tables))
{
show_modal_spinner(spin="flower",color=wes_palettes$Zissou1[4],text="Making plots")
SS_plots(Model.output,maxyr=data.file$endyr+1,verbose=FALSE)
}
if(is.null(input$no_tables))
{
show_modal_spinner(spin="flower",color=wes_palettes$Zissou1[5],text="Making tables")
try(SSexecutivesummary(Model.output))
}
if(!is.null(input$no_plots_tables)){
if(input$no_plots_tables==FALSE)
{
#Make SS plots
show_modal_spinner(spin="flower",color=wes_palettes$Zissou1[4],text="Making plots")
SS_plots(Model.output,maxyr=data.file$endyr+1,verbose=FALSE)
}
}
if(!is.null(input$no_tables)){
if(input$no_tables==FALSE)
{
#Make SS tables
show_modal_spinner(spin="flower",color=wes_palettes$Zissou1[5],text="Making tables")
try(SSexecutivesummary(Model.output))
}
}
#Run multiple jitters
if(input$jitter_choice)
{
if(input$Njitter>0)
{
show_modal_spinner(spin="flower",color=wes_palettes$Moonrise1[1],text="Run jitters")
#file.copy(paste0("Scenarios/",input$Scenario_name,"/ss.exe"),paste0("Scenarios/",input$Scenario_name,"/ss_copy.exe"),overwrite = FALSE)
jits<-jitter(
dir=paste0(getwd(),"/Scenarios/",input$Scenario_name),
Njitter=input$Njitter,
printlikes = TRUE,
jitter_fraction=input$jitter_fraction,
init_values_src=0,
verbose=FALSE,
extras = "-nohess"
)
profilemodels <- SSgetoutput(dirvec=paste0("Scenarios/",input$Scenario_name), keyvec=0:input$Njitter, getcovar=FALSE)
profilesummary <- SSsummarize(profilemodels)
minlikes<-profilesummary$likelihoods[1,-length(profilesummary$likelihoods)]==min(profilesummary$likelihoods[1,-length(profilesummary$likelihoods)])
#Find best fit model
index.minlikes<-c(1:length(minlikes))[minlikes]
jitter.likes<-profilesummary$likelihoods[1,-length(profilesummary$likelihoods)]
ref.like<-min(jitter.likes,na.rm = TRUE)
#Make plot and save to folder
main.dir<-getwd()
if(!file.exists(paste0("Scenarios/",input$Scenario_name,"/Jitter Results")))
{
dir.create(paste0("Scenarios/",input$Scenario_name,"/Jitter Results"))
}
setwd(paste0("Scenarios/",input$Scenario_name,"/Jitter Results"))
png("jitterplot.png")
jitterplot<-plot(c(1:length(jitter.likes)),jitter.likes,type="p",col="black",bg="blue",pch=21,xlab="Jitter run",ylab="-log likelihood value",cex=1.25)
points(c(1:length(jitter.likes))[jitter.likes>ref.like],jitter.likes[jitter.likes>ref.like],type="p",col="black",bg="red",pch=21,cex=1.25)
abline(h=ref.like)
# likebc<-round((length(jitter.likes[ref.like==jitter.likes])/(input$Njitter+1))*100,0)
# likelessbc<-round((length(jitter.likes[ref.like>jitter.likes])/(input$Njitter+1))*100,0)
# like10<-round((length(jitter.likes[(ref.like+10)<jitter.likes])/(input$Njitter+1))*100,0)
# like2<-round(((length(jitter.likes[(ref.like+2)>jitter.likes])-(length(jitter.likes[ref.like==jitter.likes])))/(input$Njitter+1))*100,0)
# like_2_10<-round(100-(likebc+like10+like2),0)
# legend("topright",c(paste(" ",likelessbc,"% < BC",sep=""),paste(likebc,"% = BC",sep=""),paste(like2,"% < BC+2",sep=""),paste(like_2_10,"% > BC+2 & < BC+10",sep=""),paste(like10,"% > BC+10",sep="")),bty="n")
dev.off()
save(profilesummary,file=paste0("jitter_summary.DMP"))
SSplotComparisons(profilesummary, legendlabels = c(0:input$Njitter), ylimAdj = 1.30, subplot = c(1), new = FALSE,print=TRUE,plotdir=getwd())
SSplotComparisons(profilesummary, legendlabels = c(0:input$Njitter), ylimAdj = 1.30, subplot = c(3), new = FALSE,print=TRUE,plotdir=getwd())
output$Jitterplot<-renderPlot({
# if(input$Njitter==1){return(NULL)}
# if(input$Njitter>1)
# {
#jitter.likes<-profilesummary$likelihoods[1,-length(profilesummary$likelihoods)]
#ref.like<-min(jitter.likes)
jitterplot<-plot(c(1:length(jitter.likes)),jitter.likes,type="p",col="black",bg="blue",pch=21,xlab="Jitter run",ylab="-log likelihood value",cex=1.25)
points(c(1:length(jitter.likes))[jitter.likes>ref.like],jitter.likes[jitter.likes>ref.like],type="p",col="black",bg="red",pch=21,cex=1.25)
abline(h=ref.like)
# likebc<-round((length(jitter.likes[ref.like==jitter.likes])/(input$Njitter+1))*100,0)
# likelessbc<-round((length(jitter.likes[ref.like>jitter.likes])/(input$Njitter+1))*100,0)
# like10<-round((length(jitter.likes[(ref.like+10)<jitter.likes])/(input$Njitter+1))*100,0)
# like2<-round(((length(jitter.likes[(ref.like+2)>jitter.likes])-(length(jitter.likes[ref.like==jitter.likes])))/(input$Njitter+1))*100,0)
# like_2_10<-round(100-(likebc+like10+like2),0)
# legend("topright",c(paste(" ",likelessbc,"% < BC",sep=""),paste(likebc,"% = BC",sep=""),paste(like2,"% < BC+2",sep=""),paste(like_2_10,"% > BC+2 & < BC+10",sep=""),paste(like10,"% > BC+10",sep="")),bty="n")
# }
})
#Spawning output comp
output$Jittercompplot1<-renderPlot({
SSplotComparisons(profilesummary, legendlabels = c(0:input$Njitter), ylimAdj = 1.30, subplot = c(1), new = FALSE)
})
#Relative stock status comp
output$Jittercompplot2<-renderPlot({
SSplotComparisons(profilesummary, legendlabels = c(0:input$Njitter), ylimAdj = 1.30, subplot = c(3), new = FALSE)
})
#R-run to get new best fit model
show_modal_spinner(spin="flower",color=wes_palettes$Moonrise1[2],text="Re-run best model post-jitters")
file.copy(paste0(main.dir,"/Scenarios/",input$Scenario_name,"/ss.par_",(index.minlikes[1]-1),".sso"),paste0(main.dir,"/Scenarios/",input$Scenario_name,"/ss.par"),overwrite = TRUE)
#file.rename(paste0("Scenarios/",input$Scenario_name,"/ss_copy.exe"),paste0("Scenarios/",input$Scenario_name,"/ss.exe"),overwrite = FALSE)
starter.file$init_values_src<-1
starter.file$jitter_fraction<-0
SS_writestarter(starter.file,paste0(main.dir,"/Scenarios/",input$Scenario_name),overwrite=TRUE)
RUN.SS(paste0(main.dir,"/Scenarios/",input$Scenario_name),ss.cmd="",OS.in=input$OS_choice)
Model.output<-try(SS_output(paste0(main.dir,"/Scenarios/",input$Scenario_name),verbose=FALSE,printstats = FALSE))
if(class(Model.output)=="try-error")
{
Model.output<-SS_output(paste0(main.dir,"/Scenarios/",input$Scenario_name),verbose=FALSE,printstats = FALSE,covar=FALSE)
}
show_modal_spinner(spin="flower",color=wes_palettes$Moonrise1[3],text="Making plots")
SS_plots(Model.output,maxyr=data.file$endyr+1,verbose=FALSE)
show_modal_spinner(spin="flower",color=wes_palettes$Moonrise1[4],text="Making tables")
try(SSexecutivesummary(Model.output))
}
setwd(main.dir)
}
#Add retro runs
# if(input$Retro_choice){
# mydir<-paste0(getwd(),"/Scenarios/")
# model_settings = get_settings(settings = list(base_name = input$Scenario_name,
# run = "retro",
# retro_yrs = input$first_retro_year:input$final_retro_year))
# # tryCatch({
# run_diagnostics(mydir = mydir, model_settings = model_settings)
# # },
# # warning = function(warn){
# # showNotification(paste0(warn), type = 'warning')
# # },
# # error = function(err){
# # showNotification(paste0(err), type = 'err')
# # })
# }
#Convergence diagnostics
output$converge.grad <- renderText({
max.grad<-paste0("Maximum gradient: ",Model.output$maximum_gradient_component)
})
output$converge.covar <- renderText({
covar<-paste0("Was covariance file created? ",Model.output$inputs$covar)
})
output$converge.dec <- renderText({
if(Model.output$maximum_gradient_component<0.1 & Model.output$inputs$covar==TRUE)
{converge.dec<-"Model appears converged. Please check outputs for nonsense."}
else{converge.dec<-"Model may not have converged or inputs are missing. Please use the Jitter option or check/change starting values before re-running model."}
})
#Relative biomass
output$SSout_relSB_table <- renderTable({
SB_indices<-c(which(rownames(Model.output$derived_quants)==paste0("Bratio_",input$endyr)),
which(rownames(Model.output$derived_quants)=="B_MSY/SSB_unfished"),
which(rownames(Model.output$derived_quants)==paste0("SPRratio_",input$endyr)),
which(rownames(Model.output$derived_quants)==paste0("OFLCatch_",(input$endyr+1))),
which(rownames(Model.output$derived_quants)==paste0("ForeCatch_",(input$endyr+1)))
)
Output_relSB_table<-data.frame(Model.output$derived_quants[SB_indices,1:3])
# Label=c(paste0("SO",input$endyr+1,"/SO_0"),
# "SO_MSY/SO_0",
# paste0("SPR",input$endyr+1),
# paste0("OFL",(input$endyr+1)),
# paste0("ABC",(input$endyr+1))
# ))
Output_relSB_table[,1]<-c(paste0("SO",input$endyr,"/SO_0"),
"SO_MSY/SO_0",
paste0("1-SPR",input$endyr),
paste0("OFL",(input$endyr+1)),
paste0("ABC",(input$endyr+1))
)
Output_relSB_table
# rownames=c(expression(SO[input$endyr]/SO[0]),
# expression(SO[MSY]/SO[0]),
# expression(SPR[input$endyr]),
# expression(OFL[input$endyr]),
# expression(ABC[input$endyr])
# ))
# Output_relSB_table[,1]<-c(expression('B',[input$endyr],'/B',[0]),
# expression('B'[MSY]/'B'[0]),
# expression('SPR'[input$endyr]),
# expression('OFL'[input$endyr]),
# expression('ABC'[input$endyr])
# )
})
#F estimate and relative to FMSY and proxies
output$SSout_F_table <- renderTable({
F_indices<-c(which(rownames(Model.output$derived_quants)==paste0("F_",input$endyr)),
which(rownames(Model.output$derived_quants)=="annF_Btgt"),
which(rownames(Model.output$derived_quants)=="annF_SPR"),
which(rownames(Model.output$derived_quants)=="annF_MSY")
)
F_values<-Model.output$derived_quants[F_indices,1:3]
})
#Time series output
output$SSout_table <- renderTable({
# Output_table<-Model.output$sprseries[-nrow(Model.output$sprseries),c(1,5,6,7,8,9,11,12,13,25,37)]
Output_table<-Model.output$sprseries[,c(1,5,6,7,8,9,11,12,13,25,37)]
})
#Paramters
output$Parameters_table <- renderTable({
cbind(rownames(Model.output$estimated_non_dev_parameters),Model.output$estimated_non_dev_parameters)
})
}
if(!file.exists(paste0("Scenarios/",input$Scenario_name,"/data_echo.ss_new")))
{
sendSweetAlert(
session = session,
title = "Model Warning",
text = "Model did not run or Hessian did not invert. Double check data files for errors and each input for missing values (or for 0 SD for lognormal priors) and/or re-run model using a different model specification (e.g., starting values).",
type = "warning")
}
remove_modal_spinner()
observeEvent(exists("Model.output"), {
updateTabsetPanel(session, "tabs",
selected = '2')
})
}
})
###############################################################
### Likelihood profiles, Sensitivities, and Ensemble models ###
###############################################################
roots <- getVolumes()()
#
pathModelout <- reactive({
shinyDirChoose(input, "Modelout_dir", roots= roots,session=session, filetypes=c('', 'txt'))
return(parseDirPath(roots, input$Modelout_dir))
})
observeEvent(as.numeric(input$tabs)==2,{
#observeEvent(exists("Model.output"),{
pathModelout.dir <-pathModelout()
if(!identical(pathModelout.dir, character(0)))
{
#dir.create(paste0(pathModelout.dir,"/Scenarios"))
file.copy(paste0("Scenarios/",input$Scenario_name), pathModelout.dir,recursive=TRUE,overwrite=TRUE)
if(input$Retro_choice){file.copy(paste0("Scenarios/",input$Scenario_name,"_retro"), pathModelout.dir,recursive=TRUE,overwrite=TRUE)}
}
})
########################
### Model efficiency ###
########################
shinyDirChoose(input,"ModEff_dir", roots=roots,session=session, filetypes=c('', 'txt'))
pathRetro <- reactive({
return(parseDirPath(roots, input$ModEff_dir))
})
# if(exists("ModEff_dir")){print(ModEff_dir)}
# observeEvent(as.numeric(input$tabs)==12,{
# output$ModEff_model_pick<-renderUI({
# pickerInput(
# inputId = "myModEff",
# label = "Choose model to evaluate",
# choices = list.files(pathModEff()),
# options = list(
# `actions-box` = TRUE,
# size = 12,
# `selected-text-format` = "count > 3"
# ),
# multiple = TRUE
# )
# })
# })
observeEvent(req(input$run_adnuts),{
modeff.mod.dir<-parseDirPath(roots, input$ModEff_dir) #pathModEff()
modeff.dir<-dirname(modeff.mod.dir)
modeff.name<-paste0(basename(modeff.mod.dir),"_",input$ModEff_choice)
if(dir.exists(file.path(modeff.dir,modeff.name))==FALSE)
{
dir.create(file.path(modeff.dir,modeff.name))
file.copy(list.files(modeff.mod.dir,full.names=TRUE),to=file.path(modeff.dir,modeff.name),recursive=TRUE,overwrite=TRUE)
}
#optimize model
if(input$Opt_mod==TRUE)
{
show_modal_spinner(spin="flower",color=wes_palettes$Rushmore[1],text=paste0("Run initial optimization?"))
RUN.SS(file.path(modeff.dir,modeff.name),ss.cmd="/ss -nox -mcmc 100 -hbf",OS.in=input$OS_choice)
remove_modal_spinner()
}
#Set mcmc model
show_modal_spinner(spin="flower",color=wes_palettes$Rushmore[1],text=paste0("Run ",input$ModEff_choice," model"))
chains <- parallel::detectCores()-1
m<-"ss"
p<-file.path(modeff.dir,modeff.name)
#Run MCMC model with either rwm or nuts
if(input$ModEff_choice=="RWM")
{
fit_model<- sample_rwm(model=m, path=p, iter=input$iter, warmup=0.25*input$iter,
chains=chains, thin=input$thin, duration=NULL)
}
if (input$ModEff_choice=="Nuts")
{
fit_model <- sample_nuts(model=m, path=p, iter=input$iter, warmup=0.25*input$iter,
chains=4, cores=4,control=list(metric='mle', max_treedepth=5),mceval=TRUE)
}
fit.mod.summary<-utils::capture.output(summary(fit_model), file=NULL)
output$fit.model.summary <- renderText({
#paste0(fit.mod.summary[1],fit.mod.summary[2],fit.mod.summary[3])
fit.mod.summary
})
parmax<-10
if(length(fit_model$par_names)<10){parmax<-length(fit_model$par_names)}
png(paste0(p,"/pairs_plot_slow.png"),width=600, height=350)
pairs_admb(fit_model, pars=1:parmax, order='slow')
dev.off()
png(paste0(p,"/pairs_plot_fast.png"),width=600, height=350)
pairs_admb(fit_model, pars=1:parmax, order='fast')
dev.off()
output$pairs_slow <- renderImage({
#region image.path1<-normalizePath(paste0(p,"/pairs_plot_fast.png"),mustWork=FALSE)
return(list(
src = paste0(p,"/pairs_plot_slow.png"),
contentType = "image/png",
# width = 400,
# height = 300,
style='height:60vh'))
},deleteFile=FALSE)
output$pairs_fast <- renderImage({
#region image.path1<-normalizePath(paste0(p,"/pairs_plot_fast.png"),mustWork=FALSE)
return(list(
src = paste0(p,"/pairs_plot_fast.png"),
contentType = "image/png",
# width = 400,
# height = 300,
style='height:60vh'))
},deleteFile=FALSE)
save(fit_model,file=paste0(p,"/fit_model.RData"))
remove_modal_spinner()
#if(input$run_stanout==TRUE){launch_shinyadmb(fit_model)}
})
###########################
### Likelihood profiles ###
###########################
pathLP <- reactive({
shinyDirChoose(input, "LP_dir", roots=roots,session=session, filetypes=c('', 'txt'))
return(parseDirPath(roots, input$LP_dir))
})
observeEvent(as.numeric(input$tabs)==4,{
pathLP.dir <-pathLP()
output$LikeProf_model_picks<-renderUI({
pickerInput(
inputId = "myPicker_LP",
label = "Choose parameters to profile over",
choices = c("Steepness","lnR0","Natural mortality female","Linf female","k female", "CV@Lt young female","CV@Lt old female","Natural mortality male","Linf male","k male", "CV@Lt young male", "CV@Lt old male"),
options = list(
`actions-box` = TRUE,
size = 12,
`selected-text-format` = "count > 3"
),
multiple = TRUE
)
})
})
observeEvent(input$run_Profiles,{
show_modal_spinner(spin="flower",color=wes_palettes$Darjeeling1[1],text="Profiles running")
starter.file<-SS_readstarter(paste0(pathLP(),"/starter.ss"))
#data.file<-SS_readdat(paste0(pathLP(),"/data_echo.ss_new"))
#ctl.file<-SS_readctl(paste0(pathLP(),"/control.ss_new"),use_datlist = TRUE, datlist=data.file)
rep.parms<-SS_output(pathLP(),covar=FALSE,verbose=FALSE)
rep.parms.names<-rownames(rep.parms$parameters)
# SS_parm_names<-c("SR_BH_steep", "SR_LN(R0)","NatM_p_1_Fem_GP_1","L_at_Amax_Fem_GP_1","VonBert_K_Fem_GP_1","CV_young_Fem_GP_1","CV_old_Fem_GP_1","NatM_p_1_Mal_GP_1","L_at_Amax_Mal_GP_1","VonBert_K_Mal_GP_1","CV_young_Mal_GP_1","CV_old_Mal_GP_1")
#SS_parm_names<-c(rownames(ctl.file$SR_parms)[2], rownames(ctl.file$SR_parms)[1],rownames(ctl.file$MG_parms)[1],rownames(ctl.file$MG_parms)[3],rownames(ctl.file$MG_parms)[4],rownames(ctl.file$MG_parms)[5],rownames(ctl.file$MG_parms)[6],rownames(ctl.file$MG_parms)[13],rownames(ctl.file$MG_parms)[15],rownames(ctl.file$MG_parms)[16],rownames(ctl.file$MG_parms)[17],rownames(ctl.file$MG_parms)[18])
SS_parm_names<-c(rep.parms.names[24], rep.parms.names[23],rep.parms.names[1],rep.parms.names[3],rep.parms.names[4],rep.parms.names[5],rep.parms.names[6],rep.parms.names[13],rep.parms.names[15],rep.parms.names[16],rep.parms.names[17],rep.parms.names[18])
parmnames<-input$myPicker_LP
parmnames_vec<-c("Steepness","lnR0","Natural mortality female","Linf female","k female", "CV@Lt young female","CV@Lt old female","Natural mortality male","Linf male","k male", "CV@Lt young male", "CV@Lt old male")
prof_parms_names<-SS_parm_names[parmnames_vec%in%parmnames]
prior_like<-starter.file$prior_like
use_prior_like_in<-rep(0,length(prof_parms_names))
if(prior_like==1){use_prior_like_in = rep(1,length(prof_parms_names))}
mydir = dirname(pathLP())
get = get_settings_profile( parameters = prof_parms_names,
low = as.numeric(trimws(unlist(strsplit(input$Prof_Low_val,",")))),
high = as.numeric(trimws(unlist(strsplit(input$Prof_Hi_val,",")))),
step_size = as.numeric(trimws(unlist(strsplit(input$Prof_step,",")))),
param_space = rep('real',length(as.numeric(trimws(unlist(strsplit(input$Prof_Low_val,",")))))),
use_prior_like = use_prior_like_in
)
model_settings = get_settings(settings = list(base_name = basename(pathLP()),
run = "profile",
profile_details = get))
try(run_diagnostics(mydir = mydir, model_settings = model_settings))
file.remove(paste0(dirname(mydir),"/run_diag_warning.txt"))
output$LikeProf_plot_modout <- renderImage({
image.path1<-normalizePath(file.path(paste0(pathLP(),"_profile_",prof_parms_names[1],"/parameter_panel_",prof_parms_names[1],".png")),mustWork=FALSE)
return(list(
src = image.path1,
contentType = "image/png",
# width = 400,
# height = 300,
style='height:60vh'))
},deleteFile=FALSE)
output$LikeProf_plot_Piner <- renderImage({
image.path2<-normalizePath(file.path(paste0(pathLP(),"_profile_",prof_parms_names[1],"/piner_panel_",prof_parms_names[1],".png")),mustWork=FALSE)
return(list(
src = image.path2,
contentType = "image/png",
# width = 400,
# height = 300,
style='height:60vh'))
},deleteFile=FALSE)
output$LikeProf_plot_SO <- renderImage({
image.path3<-normalizePath(file.path(paste0(pathLP(),"_profile_",prof_parms_names[1],"/",prof_parms_names[1],"_trajectories_compare1_spawnbio.png")),mustWork=FALSE)
return(list(
src = image.path3,
contentType = "image/png",
# width = 400,
# height = 300,
style='height:60vh'))
},deleteFile=FALSE)
output$LikeProf_plot_SOt_SO0 <- renderImage({
image.path4<-normalizePath(file.path(paste0(pathLP(),"_profile_",prof_parms_names[1],"/",prof_parms_names[1],"_trajectories_compare3_Bratio.png")),mustWork=FALSE)
return(list(
src = image.path4,
contentType = "image/png",
# width = 400,
# height = 300,
style='height:60vh'))
},deleteFile=FALSE)
remove_modal_spinner()
})
observeEvent(input$run_MultiProfiles,{
show_modal_spinner(spin="flower",color=wes_palettes$Darjeeling1[2],text="Multi-profiles running")
refdir<-pathLP()
mydir <- dirname(refdir)
#Read in reference model
ref.model<-SS_output(refdir)
#Read in parameter files
par.df <- fread(input$file_multi_profile$datapath,check.names=FALSE,data.table=FALSE)
L <- readLines(input$file_multi_profile$datapath, n = 1)
if(grepl(";", L)) {par.df <- read.csv2(input$file_multi_profile$datapath,check.names=FALSE)}
SS_parm_names<-rownames(ref.model$parameters)[c(23:24,1,3,4:6,13,15:18)]
parmnames_vec<-c("Steepness","lnR0","Natural mortality female","Linf female","k female", "CV@Lt young female","CV@Lt old female","Natural mortality male","Linf male","k male", "CV@Lt young male", "CV@Lt old male")
parmnames<-colnames(par.df)
prof_parms_names<-SS_parm_names[parmnames_vec%in%parmnames]
modelnames<-paste0(parmnames[1]," ",par.df[,1],";",parmnames[2]," ",par.df[,2])
#Make new folder
#para = rownames(model_settings$profile_details)[aa]
profile_dir <- paste0(refdir,"_profile_", paste(prof_parms_names,collapse="_"))
dir.create(profile_dir, showWarnings = FALSE)
if (length(list.files(profile_dir)) !=0)
{
remove <- list.files(profile_dir)
file.remove(file.path(profile_dir, remove))
}
all_files <- list.files(refdir)
file.copy(from = file.path(refdir,all_files), to = profile_dir, overwrite = TRUE)
#Set-up the starter file control file
starter.file<-SS_readstarter(paste0(profile_dir,"/starter.ss"))
starter.file$ctlfile<-"control_modified.ss"
starter.file$init_values_src<-0
starter.file$prior_like<-1
SS_writestarter(starter.file,profile_dir,overwrite=TRUE)
# low_in <- as.numeric(trimws(unlist(strsplit(input$Prof_Low_val,",")))),
# high_in <- as.numeric(trimws(unlist(strsplit(input$Prof_Hi_val,",")))),
# step_size_in <- as.numeric(trimws(unlist(strsplit(input$Prof_step,","))))
# par.df<-data.frame(mapply(function(x) seq(low[x],high[x],step_size[x]),x=1:length(low)))
# colnames(par.df)<-prof_parms_names
if(input$Hess_multi_like==FALSE)
{
profile <- profile(
dir = profile_dir, # directory
masterctlfile = "control.ss_new",
newctlfile = "control_modified.ss",
string = prof_parms_names,
profilevec = par.df,
extras = "-nohess",
prior_check=TRUE,
show_in_console = TRUE
)
}
if(input$Hess_multi_like==TRUE)
{
profile <- profile(
dir = profile_dir, # directory
masterctlfile = "control.ss_new",
newctlfile = "control_modified.ss",
string = prof_parms_names,
profilevec = par.df,
prior_check=TRUE,
show_in_console = TRUE
)
}
# get model output
profilemodels <- SSgetoutput(dirvec=profile_dir,keyvec=1:nrow(par.df), getcovar=FALSE)
n <- length(profilemodels)
profilesummary <- SSsummarize(profilemodels)
try(SSplotComparisons(profilesummary, legendlabels = modelnames, ylimAdj = 1.30, new = FALSE,plot=FALSE,print=TRUE, legendloc = 'topleft',uncertainty=TRUE,plotdir=profile_dir,btarg=TRP_multi_like,minbthresh=LRP_multi_like))
save(profilesummary,file=paste0(profile_dir,"/multiprofile.DMP"))
# add total likelihood (row 1) to table created above
par.df$like <- as.numeric(profilesummary$likelihoods[1, 1:n])
par.df$likediff <- as.numeric(profilesummary$likelihoods[1, 1:n]-ref.model$likelihoods_used[1,1])
par.df$Bratio <- as.numeric(profilesummary$Bratio[grep((profilesummary$endyrs[1]),profilesummary$Bratio$Label), 1:n])
par.df$SB0 <- as.numeric(profilesummary$SpawnBio[1, 1:n])
par.df$SBcurrent <- as.numeric(profilesummary$SpawnBio[grep((profilesummary$endyrs[1]),profilesummary$SpawnBio$Label), 1:n])
SBcurrmax<-max(par.df$SBcurrent)
colnames(par.df)<-c(parmnames,c("Likelihood","Likelihood_difference",paste0("SB",profilesummary$endyrs[1],"/SB0"),"SB0",paste0("SB",profilesummary$endyrs[1])))
save(par.df,file=paste0(profile_dir,"/multiprofilelikelihoods.DMP"))
write.csv(par.df,file=paste0(profile_dir,"/multiprofilelikelihoods.csv"))
#This reactive object is needed to get the plots to work
plot.dat<-reactive({
plot.dat<-melt(par.df,id.vars=c( colnames(par.df)[1:2]),measure.vars=c("Likelihood_difference",paste0("SB",profilesummary$endyrs[1],"/SB0"),"SB0",paste0("SB",profilesummary$endyrs[1])))
plot.dat
})
blank_data<- data.frame(variable = c("Likelihood_difference", "Likelihood_difference", paste0("SB",profilesummary$endyrs[1],"/SB0"), paste0("SB",profilesummary$endyrs[1],"/SB0"), "SB0", "SB0",paste0("SB",profilesummary$endyrs[1]),paste0("SB",profilesummary$endyrs[1])), x =min(par.df[,1]),y = c(min(par.df$Likelihood_difference),max(par.df$Likelihood_difference), 0, 1, 0, ceiling(max(par.df$SB0)),0,ceiling(SBcurrmax)))
blank_data$variable<-factor(blank_data$variable,c("Likelihood_difference",paste0("SB",profilesummary$endyrs[1],"/SB0"),"SB0",paste0("SB",profilesummary$endyrs[1])))
refmodel.dat<-data.frame(variable = c("Likelihood_difference",paste0("SB",profilesummary$endyrs[1],"/SB0"),"SB0",paste0("SB",profilesummary$endyrs[1])), x =ref.model$parameters[grep(prof_parms_names[1],ref.model$parameters$Label),3],y = c(0,ref.model$sprseries$Deplete[grep((profilesummary$endyrs[1]),profilesummary$Bratio$Label)+1],ref.model$SBzero,ref.model$derived_quants[grep((profilesummary$endyrs[1]),profilesummary$SpawnBio$Label),2]))
#multiprofplotfun<-function(plot.dat)
#{
output$LikeProf_multiplot <- renderPlot({
multiplot<-ggplot(plot.dat(),aes(plot.dat()[,1],value))+
geom_line(lwd=1.25)+
facet_wrap(~variable,scales="free_y")+
geom_blank(data = blank_data, aes(x = x, y = y,z="variable"))+
ylab("Difference in -log likelihood")+
scale_x_continuous(name = paste(parmnames[1],"and",parmnames[2]),
breaks =par.df[,1],
labels = paste0(par.df[,1],"\n",par.df[,2]))+
geom_hline(data = data.frame(yint=c(-1.96,0,1.96,0.4,0.25),variable=c("Likelihood_difference","Likelihood_difference","Likelihood_difference",paste0("SB",profilesummary$endyrs[1],"/SB0"),paste0("SB",profilesummary$endyrs[1],"/SB0"))),
aes(yintercept = yint), linetype = c("solid","dotted","solid","dotted","solid"),color=c("red","black","red","darkgreen","red"),lwd=1)+
geom_point(data=refmodel.dat,aes(x=x,y=y),color="blue",size=4)+
theme_bw()
ggsave(paste0(profile_dir,"/","multilikelihood_profile.png"),width=10,height=10,units="in")
multiplot
})
#}
# output$LikeProf_multiplot <- renderPlot({
# plotPNG(func=multiprofplotfun(plot.dat()),paste0(profile_dir,"/",paste(parmnames,collapse="_"),"_multilikelihood_profile.png"))
# })
# plot.dat2<-reactive({
# plot.dat2<-melt(par.df,id.vars=c( colnames(par.df)[1:2]),measure.vars=c("Likelihood_difference",paste0("SB",profilesummary$endyrs[1]-1,"/SB0"),"SB0",paste0("SB",profilesummary$endyrs[1]-1)))
# plot.dat2
# })
# png(file = paste0(profile_dir,"/","multilikelihood_profile.png"),width = 10, height = 10, units = "in", res = 300, pointsize = pt)
# # multiplot
# ggplot(plot.dat2(),aes(plot.dat2()[,1],value))+
# geom_line(lwd=1.25)+
# facet_wrap(~variable,scales="free_y")+
# #geom_blank(data = blank_data, aes(x = x, y = y,z="variable"))+
# ylab("Difference in -log likelihood")+
# #scale_x_continuous(name = paste(parmnames[1],"and",parmnames[2]),
# # breaks =par.df[,1],
# # labels = paste0(par.df[,1],"\n",par.df[,2]))+
# geom_hline(data = data.frame(yint=c(-1.96,0,1.96,0.4,0.25),variable=c("Likelihood_difference","Likelihood_difference","Likelihood_difference",paste0("SB",profilesummary$endyrs[1]-1,"/SB0"),paste0("SB",profilesummary$endyrs[1]-1,"/SB0"))),
# aes(yintercept = yint), linetype = c("solid","dotted","solid","dotted","solid"),color=c("red","black","red","darkgreen","red"),lwd=1)+
# #geom_point(data=refmodel.dat,aes(x=x,y=y),color="blue",size=3)+
# theme_bw() # multiprofplot
#dev.off()
# png(file = paste0(profile_dir,"/",paste(parmnames,collapse="_"),"_multilikelihood_profile.png"),width = 10, height = 10, units = "in", res = 300, pointsize = pt)
# output$LikeProf_multiplot <- renderImage({
# image.path<-normalizePath(file.path(paste0(profile_dir,paste0("\\",paste(parmnames,collapse="_"),"_multilikelihood_profile.png"))),mustWork=FALSE)
# return(list(
# src = image.path,
# contentType = "image/png",
# # width = 400,
# # height = 300,
# style='height:60vh'))
# },deleteFile=FALSE)
# reshape data frame into a matrix for use with contour
# pngfun(wd = mydir, file = paste0("contour_profile.png"), h = 7,w = 12)
# contour(x = as.numeric(rownames(like_matrix)),
# y = as.numeric(colnames(like_matrix)),
# z = like_matrix)
# dev.off()
# make contour plot
# output$LikeProf_multi_contour <- renderPlot({
# like_matrix <- reshape2::acast(par.df, colnames(par.df)[1]~colnames()[2], value.var="like")
# pngfun(wd = mydir, file = paste0("contour_profile.png"), h = 7,w = 12)
# contour(x = as.numeric(rownames(like_matrix)),
# y = as.numeric(colnames(like_matrix)),
# z = like_matrix)
# dev.off()
# })
remove_modal_spinner()
})
#################
###############################
####### Retrospectives ########
###############################
shinyDirChoose(input,"Retro_dir", roots=roots,session=session, filetypes=c('', 'txt'))
pathRetro <- reactive({
return(parseDirPath(roots, input$Retro_dir))
})
observeEvent(input$run_Retro_comps,{
#if(input$run_Retro_comps){
show_modal_spinner(spin="flower",color=wes_palettes$Royal1[1],text="Running retrospectives")
mydir_in<-dirname(pathRetro())
scenario_in<-basename(pathRetro())
model_settings = get_settings(settings = list(base_name = scenario_in,
run = "retro",
retro_yrs = input$first_retro_year_in:input$final_retro_year_in))
run_diagnostics(mydir = mydir_in, model_settings = model_settings)
# tryCatch({
# run_diagnostics(mydir = mydir_in, model_settings = model_settings)
# },
# warning = function(warn){
# showNotification(paste0(warn), type = 'warning')
# },
# error = function(err){
# showNotification(paste0(err), type = 'err')
# })
#}
output$Retro_comp_plotSB <- renderImage({
image.path<-normalizePath(file.path(paste0(pathRetro(),"_retro/compare2_spawnbio_uncertainty.png")),mustWork=FALSE)
return(list(
src = image.path,
contentType = "image/png",
# width = 400,
# height = 300,
style='height:60vh'))
},deleteFile=FALSE)
output$Retro_comp_plotBratio <- renderImage({
image.path<-normalizePath(file.path(paste0(pathRetro(),"_retro/compare4_Bratio_uncertainty.png")),mustWork=FALSE)
return(list(
src = image.path,
contentType = "image/png",
# width = 400,
# height = 300,
style='height:60vh'))
},deleteFile=FALSE)
remove_modal_spinner()
})
##############################
###############################
### Sensitivity comparisons ###
###############################
pathSensi <- reactive({
shinyDirChoose(input, "Sensi_dir", roots=roots,session=session, filetypes=c('', 'txt'))
return(parseDirPath(roots, input$Sensi_dir))
})
observeEvent(as.numeric(input$tabs)==6,{
output$Sensi_model_Ref<-renderUI({
#dirinfo <- parseDirPath(roots, input$Sensi_dir)
pickerInput(
inputId = "myPicker_Ref",
label = "Choose reference model",
#choices = list.files(dirinfo),
choices = list.files(pathSensi()),
options = list(
`actions-box` = TRUE,
size = 12,
`selected-text-format` = "count > 3"
),
multiple = FALSE
)
})
})
observeEvent(!is.null(input$myPicker_Ref),{
# observeEvent(as.numeric(input$tabs)==6,{
output$Sensi_model_picks<-renderUI({
#dirinfo <- parseDirPath(roots, input$Sensi_dir)
pickerInput(
inputId = "myPicker",
label = "Choose scenarios to compare to reference model",
#choices = list.files(dirinfo),
choices = list.files(pathSensi()),
options = list(
`actions-box` = TRUE,
size = 12,
`selected-text-format` = "count > 3"
),
multiple = TRUE
)
})
})
#SS.comparisons<-observeEvent(as.numeric(input$tabs)==5,{
Sensi_model_dir_out<-eventReactive(req(input$run_Sensi_comps&!is.null(input$myPicker)&as.numeric(input$tabs)==6),{
if(!file.exists(paste0(pathSensi(),"/Sensitivity Comparison Plots")))
{
dir.create(paste0(pathSensi(),"/Sensitivity Comparison Plots"))
}
Sensi_model_dir_out_Ref<-paste0(pathSensi(),"/",input$myPicker_Ref)
Sensi_model_dir_sensi<-paste0(pathSensi(),"/",input$myPicker)
Sensi_model_dir<-c(Sensi_model_dir_out_Ref,Sensi_model_dir_sensi)
Sensi_model_dir
})
#&exists(Sensi_model_dir_out())
observeEvent(req(input$run_Sensi_comps),{
show_modal_spinner(spin="flower",color=wes_palettes$Rushmore[1],text="Comparisons running")
modelnames<-c(input$myPicker_Ref,input$myPicker)
zz<-list()
Runs<-length(Sensi_model_dir_out())
for(i in 1:Runs) {zz[[i]]<-SS_output(paste0(Sensi_model_dir_out()[i]))}
modsummary.sensi<- SSsummarize(zz)
col.vec = rc(n=length(modelnames), alpha = 1)
shade = adjustcolor(col.vec[1], alpha.f = 0.10)
TRP.in<-input$Sensi_TRP
LRP.in<-input$Sensi_LRP
if(is.na(TRP.in)){TRP.in<-0}
if(is.na(LRP.in)){LRP.in<-0}
dir.create(paste0(pathSensi(),"/Sensitivity Comparison Plots/",input$Sensi_comp_file))
#Sensi_uncertainty_choice<-input$Sensi_uncertainty_choice
#if (all(is.na(quantsSD[, i]) | quantsSD[, i] == 0))
Sensi_uncertainty_choice<-TRUE
pngfun(wd = paste0(pathSensi(),"/Sensitivity Comparison Plots/",input$Sensi_comp_file), file = paste0(input$Sensi_comp_file,".png"), h = 7,w = 12)
par(mfrow = c(1,3))
try(SSplotComparisons(modsummary.sensi, legendlabels = modelnames, ylimAdj = 1.30, subplot = c(2,4),col = col.vec, new = FALSE,btarg=TRP.in,minbthresh=LRP.in,uncertainty=Sensi_uncertainty_choice))
try(SSplotComparisons(modsummary.sensi, legendlabels = modelnames, ylimAdj = 1.30, subplot = 11,col = col.vec, new = FALSE, legendloc = 'topleft',btarg=TRP.in,minbthresh=LRP.in,uncertainty=Sensi_uncertainty_choice))
dev.off()
try(SSplotComparisons(modsummary.sensi, legendlabels = modelnames, ylimAdj = 1.30,col = col.vec, new = FALSE,print=TRUE, legendloc = 'topleft',btarg=TRP.in,minbthresh=LRP.in,uncertainty=Sensi_uncertainty_choice,plotdir=paste0(pathSensi(),"/Sensitivity Comparison Plots/",input$Sensi_comp_file)))
save(modsummary.sensi,file=paste0(pathSensi(),"/Sensitivity Comparison Plots/",input$Sensi_comp_file,"/",input$Sensi_comp_file,".DMP"))
pngfun(wd = paste0(pathSensi(),"/Sensitivity Comparison Plots/",input$Sensi_comp_file), file = paste0(input$Sensi_comp_file,"_no_uncertainty.png"), h = 7,w = 12)
par(mfrow = c(1,3))
try(SSplotComparisons(modsummary.sensi, legendlabels = modelnames, ylimAdj = 1.30, subplot = c(1,3),col = col.vec, new = FALSE,btarg=TRP.in,minbthresh=LRP.in,uncertainty=Sensi_uncertainty_choice))
try(SSplotComparisons(modsummary.sensi, legendlabels = modelnames, ylimAdj = 1.30, subplot = 11,col = col.vec, new = FALSE, legendloc = 'topleft',btarg=TRP.in,minbthresh=LRP.in,uncertainty=Sensi_uncertainty_choice))
dev.off()
output$Sensi_comp_plot <- renderImage({
if (all(is.na(modsummary.sensi$quantsSD[, 1]) | modsummary.sensi$quantsSD[, 1] == 0))
{
image.path<-normalizePath(file.path(paste0(pathSensi(),"/Sensitivity Comparison Plots/",input$Sensi_comp_file,"/",input$Sensi_comp_file, '_no_uncertainty.png')),mustWork=FALSE)
return(list(
src = image.path,
contentType = "image/png",
# width = 400,
# height = 300,
style='height:60vh'))
}
else
{
image.path<-normalizePath(file.path(paste0(pathSensi(),"/Sensitivity Comparison Plots/",input$Sensi_comp_file,"/",input$Sensi_comp_file, '.png')),mustWork=FALSE)
return(list(
src = image.path,
contentType = "image/png",
# width = 400,
# height = 300,
style='height:60vh'))
}
},deleteFile=FALSE)
#Relative error sensitivity plots
SensiRE_breaks_in<-as.numeric(trimws(unlist(strsplit(input$SensiRE_breaks,","))))
SensiRE_xcenter_in<-as.numeric(trimws(unlist(strsplit(input$SensiRE_xcenter,","))))
SensiRE_ycenter_in<-as.numeric(trimws(unlist(strsplit(input$SensiRE_ycenter,","))))
SensiRE_headers_in<-trimws(unlist(strsplit(input$SensiRE_headers,",")))
yminmax_sensi<-rep(c(input$SensiRE_ymin,input$SensiRE_ymax),5)
r4ss::SS_Sensi_plot(dir=paste0(pathSensi(),"/Sensitivity Comparison Plots/",input$Sensi_comp_file,"/"),
model.summaries=modsummary.sensi,
current.year=modsummary.sensi$endyrs[1]+1,
mod.names=modelnames, #List the names of the sensitivity runs
#likelihood.out=c(0,0,0),
Sensi.RE.out="Sensi_RE_out.DMP", #Saved file of relative errors
CI=0.95, #Confidence interval box based on the reference model
TRP.in=input$Sensi_TRP, #Target relative abundance value
LRP.in=input$Sensi_LRP, #Limit relative abundance value
sensi_xlab="Sensitivity scenarios", #X-axis label
ylims.in=yminmax_sensi, #Y-axis label
plot.figs=c(1,1,1,1,1,1), #Which plots to make/save?
sensi.type.breaks=SensiRE_breaks_in, #vertical breaks that can separate out types of sensitivities
anno.x=SensiRE_xcenter_in, # Vertical positioning of the sensitivity types labels
anno.y=SensiRE_ycenter_in, # Horizontal positioning of the sensitivity types labels
anno.lab=SensiRE_headers_in #Sensitivity types labels
)
output$SensiRE_comp_plot <- renderImage({
image.path<-normalizePath(file.path(paste0(pathSensi(),"/Sensitivity Comparison Plots/",input$Sensi_comp_file,"/Sensi_REplot_SB_Dep_F_MSY.png")),mustWork=FALSE)
return(list(
src = image.path,
contentType = "image/png",
width = 800,
height = 1200,
style='height:60vh'))
},deleteFile=FALSE)
output$SensiRElog_comp_plot <- renderImage({
image.path<-normalizePath(file.path(paste0(pathSensi(),"/Sensitivity Comparison Plots/",input$Sensi_comp_file,"/Sensi_logREplot_SB_Dep_F_MSY.png")),mustWork=FALSE)
return(list(
src = image.path,
contentType = "image/png",
width = 400,
height = 300,
style='height:60vh'))
},deleteFile=FALSE)
remove_modal_spinner()
})
#############################
#############################
# image.path<-eventReactive(exists(file.path(paste0(path1(),"/Sensitivity Comparison Plots/",
# input$Sensi_comp_file, '.png'))),{
# image.path<-normalizePath(file.path(paste0(path1(),"/Sensitivity Comparison Plots/",
# input$Sensi_comp_file, '.png')),mustWork=FALSE)
# })
# output$Sensi_comp_plot <- renderImage({
# image.path<-normalizePath(file.path(paste0(path1(),"/Sensitivity Comparison Plots/",
# input$Sensi_comp_file, '.png')),mustWork=FALSE)
# return(list(
# src = image.path,
# contentType = "image/png",
# # width = 400,
# # height = 300,
# style='height:60vh'))
# print(input$run_Sensi_comps[1])
# },deleteFile=FALSE)
####################################
##########################
### Ensemble modelling ###
##########################
pathEnsemble <- reactive({
shinyDirChoose(input, "Ensemble_dir", roots=roots, filetypes=c('', 'txt'))
return(parseDirPath(roots, input$Ensemble_dir))
})
#Used to have as.numeric(input$tabs)==4
observeEvent(as.numeric(input$tabs)==7,{
output$Ensemble_model_picks<-renderUI({
pickerInput(
inputId = "myEnsemble",
label = "Choose scenarios to ensemble",
choices = list.files(pathEnsemble()),
options = list(
`actions-box` = TRUE,
size = 12,
`selected-text-format` = "count > 3"
),
multiple = TRUE
)
})
})
#Ensemble_model_dir_out<-eventReactive(req(input$run_Ensemble&!is.null(input$myEnsemble)&as.numeric(input$tabs)==6),{
observeEvent(req(input$run_Ensemble&!is.null(input$myEnsemble)&as.numeric(input$tabs)==7),{
show_modal_spinner(spin="flower",color=wes_palettes$Rushmore[1],text="Prepare models to combine into ensembles")
#Ensemble_model_dir_out<-eventReactive(input$run_Ensemble,{
#Ensemble.outputs<-eventReactive(input$run_Ensemble,{
if(!file.exists(paste0(pathEnsemble(),"/Ensemble outputs")))
{
dir.create(paste0(pathEnsemble(),"/Ensemble outputs"))
}
Ensemble_model_dir_out<-paste0(pathEnsemble(),"/Ensemble outputs/",input$Ensemble_file)
dir.create(Ensemble_model_dir_out)
# })
# print(Ensemble_model_dir_out())
# exists("Ensemble_model_dir_out()")
#Ensemble_model_dir_out
#})
#exists(Ensemble_model_dir_out())
# observeEvent(req(input$run_Ensemble&!is.null(input$myEnsemble)),{
# Ensemble.outputs<-eventReactive(input$run_Ensemble,{
modelnames<-input$myEnsemble
zz<-list()
Runs<-length(input$myEnsemble)
for(i in 1:Runs) {zz[[i]]<-SS_output(paste0(pathEnsemble(),"/",input$myEnsemble[i]))}
modsummary.ensemble<- SSsummarize(zz)
Ensemble_wts<-as.numeric(trimws(unlist(strsplit(input$Ensemble_wts,","))))
Stand_ensemble_wts<-Ensemble_wts/sum(Ensemble_wts)
Nsamps_ensemble<-10000
Nsamps_ensemble_wts<-round(Nsamps_ensemble*Stand_ensemble_wts)
#Calculate weighted values
mean.fxn <- function(x, y) rnorm(numdraws, mean = x, sd = y)
#Spawning outputs
#Bratio
SpOt_en<-Bratio_en<-F_en<-SPR_en<-list()
SO_0<-SO_t<-Bratio_t<-F_t<-SPR_t<-data.frame(Year=NA,Metric=NA,Model=NA)
#Create weighted ensembles
for (i in 1:length(Nsamps_ensemble_wts))
{
numdraws<-Nsamps_ensemble_wts[i]
SpOt_en[[i]]<-Map(mean.fxn,modsummary.ensemble$SpawnBio[,i],modsummary.ensemble$SpawnBioSD[,i])
names(SpOt_en[[i]])<-modsummary.ensemble$SpawnBio$Yr
SO_0<-rbind(SO_0,data.frame(Year=as.numeric(names(SpOt_en[[i]][1])),Metric=unlist(SpOt_en[[i]][1]),Model=input$myEnsemble[i]))
SO_t<-rbind(SO_t,data.frame(Year=names(SpOt_en[[i]][nrow(modsummary.ensemble$SpawnBio)]),Metric=unlist(SpOt_en[[i]][length(Nsamps_ensemble_wts)]),Model=input$myEnsemble[i]))
Bratio_en[[i]]<-Map(mean.fxn,modsummary.ensemble$Bratio[,i],modsummary.ensemble$BratioSD[,i])
names(Bratio_en[[i]])<-modsummary.ensemble$Bratio$Yr
Bratio_t<-rbind(Bratio_t,data.frame(Year=names(Bratio_en[[i]][nrow(modsummary.ensemble$Bratio)]),Metric=unlist(Bratio_en[[i]][nrow(modsummary.ensemble$Bratio)]),Model=input$myEnsemble[i]))
F_en[[i]]<-Map(mean.fxn,modsummary.ensemble$Fvalue[,i],modsummary.ensemble$FvalueSD[,i])
names(F_en[[i]])<-modsummary.ensemble$Fvalue$Yr
F_t<-rbind(F_t,data.frame(Year=names(F_en[[i]][nrow(modsummary.ensemble$Fvalue)]),Metric=unlist(F_en[[i]][nrow(modsummary.ensemble$Fvalue)]),Model=input$myEnsemble[i]))
SPR_en[[i]]<-Map(mean.fxn,modsummary.ensemble$SPRratio[,i],modsummary.ensemble$SPRratioSD[,i])
names(SPR_en[[i]])<-modsummary.ensemble$SPRratio$Yr
SPR_t<-rbind(SPR_t,data.frame(Year=names(SPR_en[[i]][nrow(modsummary.ensemble$SPRratio)]),Metric=unlist(SPR_en[[i]][nrow(modsummary.ensemble$SPRratio)]),Model=input$myEnsemble[i]))
}
#Reduce(intersect,list(names(list1),names(list2),names(list3))) # Code to find matches in multiple vectors. For future option of mixing models with different dimensions.
#Assemble ensembles
Ensemble_SO<-SpOt_en[[1]]
Ensemble_Bratio<-Bratio_en[[1]]
Ensemble_F<-F_en[[1]]
Ensemble_SPR<-SPR_en[[1]]
for(ii in 2:length(Nsamps_ensemble_wts))
{
Ensemble_SO<-mapply(c,Ensemble_SO,SpOt_en[[ii]])
Ensemble_Bratio<-mapply(c,Ensemble_Bratio,Bratio_en[[ii]])
Ensemble_F<-mapply(c,Ensemble_F,F_en[[ii]])
Ensemble_SPR<-mapply(c,Ensemble_SPR,SPR_en[[ii]])
}
SO_0<-rbind(SO_0[-1,],data.frame(Year=as.numeric(colnames(Ensemble_SO)[1]),Metric=Ensemble_SO[,1],Model="Ensemble"))
SO_t<-rbind(SO_t[-1,],data.frame(Year=as.numeric(colnames(Ensemble_SO)[ncol(Ensemble_SO)]),Metric=Ensemble_SO[,ncol(Ensemble_SO)],Model="Ensemble"))
Bratio_t<-rbind(Bratio_t[-1,],data.frame(Year=as.numeric(colnames(Ensemble_Bratio)[ncol(Ensemble_Bratio)]),Metric=Ensemble_Bratio[,ncol(Ensemble_Bratio)],Model="Ensemble"))
F_t<-rbind(F_t[-1,],data.frame(Year=as.numeric(colnames(Ensemble_F)[ncol(Ensemble_F)]),Metric=Ensemble_F[,ncol(Ensemble_F)],Model="Ensemble"))
SPR_t<-rbind(SPR_t[-1,],data.frame(Year=as.numeric(colnames(Ensemble_SPR)[ncol(Ensemble_SPR)]),Metric=Ensemble_SPR[,ncol(Ensemble_SPR)],Model="Ensemble"))
SO_0$Year<-as.factor(SO_0$Year)
SO_t$Year<-as.factor(SO_t$Year)
Bratio_t$Year<-as.factor(Bratio_t$Year)
F_t$Year<-as.factor(F_t$Year)
SPR_t$Year<-as.factor(SPR_t$Year)
# mean_cl_quantile <- function(x, q = c(0.1, 0.9), na.rm = TRUE){
# dat <- data.frame(y = mean(x, na.rm = na.rm),
# ymin = quantile(x, probs = q[1], na.rm = na.rm),
# ymax = quantile(x, probs = q[2], na.rm = na.rm))
# return(dat)
# }
show_modal_spinner(spin="flower",color=wes_palettes$Rushmore[2],text="Preparing ensemble plots")
#Boxplots
gg1<-ggplot(SO_0,aes(Model,Metric))+
geom_violin()+
ylab("Initial Spawning Output")
gg2<-ggplot(SO_t,aes(Model,Metric))+
geom_violin()+
ylab("Terminal Year Spawning Output")
gg3<-ggplot(Bratio_t,aes(Model,Metric))+
geom_violin()+
ylab("Relative stock status")
gg4<-ggplot(F_t,aes(Model,Metric))+
geom_violin()+
ylab("Fishing mortality")
gg5<-ggplot(SPR_t,aes(Model,Metric))+
geom_violin()+
ylab("1-SPR")
ggarrange(gg1,gg2,gg3,gg4,gg5)
ggsave(paste0(Ensemble_model_dir_out,"/Ensemble_comp_plots.png"))
output$Ensemble_plots <- renderPlot({
ggarrange(gg1,gg2,gg3,gg4,gg5)})
#Spawning Output plot
Ensemble_SO_plot<-reshape2::melt(Ensemble_SO,value.name="SO")
colnames(Ensemble_SO_plot)[2]<-"Year"
Ensemble_SO_plot$Year<-as.factor(Ensemble_SO_plot$Year)
ggplot(Ensemble_SO_plot,aes(Year,SO,fill=Year))+
geom_violin()+
theme(legend.position="none")+
theme(axis.text.x = element_text(angle = 45, hjust = 1,vjust=0.5,size=10))+
ylab("Spawning Output")
ggsave(paste0(Ensemble_model_dir_out,"/Ensemble_SO.png"))
#Relative stock status plot
Ensemble_Bratio_plot<-reshape2::melt(Ensemble_Bratio,value.name="Bratio")
colnames(Ensemble_Bratio_plot)[2]<-"Year"
Ensemble_Bratio_plot$Year<-as.factor(Ensemble_Bratio_plot$Year)
ggplot(Ensemble_Bratio_plot,aes(Year,Bratio,fill=Year))+
geom_violin()+
theme(legend.position="none")+
theme(axis.text.x = element_text(angle = 45, hjust = 1,vjust=0.5,size=10))+
ylab("SBt/SO0")
ggsave(paste0(Ensemble_model_dir_out,"/Ensemble_Bratio.png"))
#F plot
Ensemble_F_plot<-reshape2::melt(Ensemble_F,value.name="F")
colnames(Ensemble_F_plot)[2]<-"Year"
Ensemble_F_plot$Year<-as.factor(Ensemble_F_plot$Year)
ggplot(Ensemble_F_plot,aes(Year,F,fill=Year))+
geom_violin()+
theme(legend.position="none")+
theme(axis.text.x = element_text(angle = 45, hjust = 1,vjust=0.5,size=10))+
ylab("Fishing mortality")
ggsave(paste0(Ensemble_model_dir_out,"/Ensemble_F.png"))
#1-SPR plot
Ensemble_SPR_plot<-reshape2::melt(Ensemble_SO,value.name="SPR")
colnames(Ensemble_SPR_plot)[2]<-"Year"
Ensemble_SPR_plot$Year<-as.factor(Ensemble_SPR_plot$Year)
ggplot(Ensemble_SPR_plot,aes(Year,SPR,fill=Year))+
geom_violin()+
theme(legend.position="none")+
theme(axis.text.x = element_text(angle = 45, hjust = 1,vjust=0.5,size=10))+
ylab("1-SPR")
ggsave(paste0(Ensemble_model_dir_out,"/Ensemble_SPR.png"))
#Get simpler plots for SB0, SBcurrent, RSS, F, and SPR in terminal year
# ggplot(reshape2::melt(Ensemble_Bratio,value.name="Bratio"),aes(Var2,Bratio))+
# stat_summary(geom = "line", fun = median)+
# ylim(0,1)+
# stat_summary(geom = "ribbon", fun.data = mean_cl_quantile, alpha = 0.3)
#Make outputs
show_modal_spinner(spin="flower",color=wes_palettes$Rushmore[3],text="Saving ensemble objects")
Model.outputs<-list("Spawning Output"=SpOt_en,"Relative Stock Status"=Bratio_en,"Fishing mortality"=F_en,"1-SPR"=SPR_en)
Ensemble.outputs<-list("Spawning Output"=Ensemble_SO,"Relative Stock Status"=Ensemble_Bratio,"Fishing mortality"=Ensemble_F,"1-SPR"=Ensemble_SPR)
Ensemble.outputs.plots<-list("Spawning Output"=Ensemble_SO_plot,"Relative Stock Status"=Ensemble_Bratio_plot,"Fishing mortality"=Ensemble_F_plot,"1-SPR"=Ensemble_SPR_plot)
save(Model.outputs,file=paste0(Ensemble_model_dir_out,"/Model_results",".DMP"))
save(Ensemble.outputs,file=paste0(Ensemble_model_dir_out,"/Ensemble_results",".DMP"))
save(Ensemble.outputs.plots,file=paste0(Ensemble_model_dir_out,"/Ensemble_results_plots",".DMP"))
remove_modal_spinner()
# return(Ensemble.outputs)
})
#})
#observeEvent(req(input$run_Ensemble&exists("Ensemble.outputs()")),{
#
# })
#Create figures of weighted values
# output$Sensi_comp_plot <- renderImage({
# image.path<-normalizePath(file.path(paste0(path1(),"/Sensitivity Comparison Plots/",
# input$Sensi_comp_file, '.png')),mustWork=FALSE)
# return(list(
# src = image.path,
# contentType = "image/png",
# # width = 400,
# # height = 300,
# style='height:60vh'))
# },deleteFile=FALSE)
})
|
#' Colour key
#'
#' Display a colour key based on the most recent trapping records for a given
#' grid and year.
#'
#' @param con Connection to KRSP database
#' @param grid character; grid to display colour key for.
#' @param year integer; year to display colour key for. Defaults to current year.
#'
#' @return A data frame allowing identification of squirrels by colours. In this
#' data frame there are three flags: \code{valid} indicates whether the
#' colours are valid (allowing for at most 2 colours per ear),
#' \code{duplicate} indicates whether that combination of colours is used on
#' multiple squirrels, and \code{standard} indicates whether a squirrel has
#' standard colouring, i.e. one colour in each ear and no mixing of wires,
#' pipes, and discs.
#'
#' @export
#' @examples
#' \dontrun{
#' con <- krsp_connect()
#' krsp_colours(con, grid = "KL", year = 2014)
#' }
krsp_colours <- function(con, grid, year) {
UseMethod("krsp_colours")
}
#' @export
krsp_colours.krsp <- function(con, grid, year = current_year()) {
# assertion on arguments
assert_that(inherits(con, "src_dbi"),
valid_grid(grid, single = TRUE),
valid_year(year, single = TRUE))
year_arg <- as.integer(year)
# query for most recent trapping record
recent_query <- sprintf(
"SELECT
t.id, t.squirrel_id, t.date, t.ft, s.sex,
t.taglft, t.tagrt,
t.color_left, t.color_right,
t.locx, t.locy,
(l.id IS NOT NULL) AS juvenile
FROM
trapping t
INNER JOIN squirrel s
ON t.squirrel_id = s.id
LEFT JOIN juvenile j
ON s.id = j.squirrel_id
LEFT JOIN litter l
ON j.litter_id = l.id
AND YEAR(COALESCE(l.fieldBDate, l.date1, l.tagDt)) = %i
WHERE
s.gr = '%s'
AND (t.squirrel_id, t.date) IN (
SELECT squirrel_id, MAX(date) as max_date
FROM trapping
WHERE YEAR(date) = %i
GROUP BY squirrel_id);", year_arg, grid, year_arg)
recent <- krsp_sql(con, recent_query)
# query for juveniles
juve_query <- sprintf(
"SELECT
j.squirrel_id, l.tagDt AS date, j.sex,
j.taglft AS taglft, j.tagrt,
j.color_left, j.color_right,
l.nx2 AS locx, l.ny2 AS locy,
TRUE AS juvenile
FROM
litter l
INNER JOIN juvenile j
ON l.id = j.litter_id
WHERE
l.grid = '%s'
AND l.yr = %i
AND l.tagDt IS NOT NULL
AND (j.color_left IS NOT NULL OR j.color_right IS NOT NULL);",
grid, year_arg)
juveniles <- krsp_sql(con, juve_query)
# remove duplicate entries for squirrels
recent <- recent %>%
arrange_(~ desc(id)) %>%
# remove dead squirrels
filter_(~ ft %in% 1:3) %>%
group_by_("squirrel_id") %>%
filter_(~ row_number() == 1) %>%
ungroup() %>%
select_(~ -id, ~ -ft)
# combine litter and trapping data
trap_lit <- bind_rows(juveniles, recent) %>%
# only keep most recent between trapping and litter
group_by_("squirrel_id") %>%
arrange_(~ desc(date)) %>%
filter_(~ row_number() == 1) %>%
ungroup()
if (!is.data.frame(trap_lit) | nrow(trap_lit) == 0) {
return(as.tbl(data.frame(
)))
}
results <- trap_lit %>%
mutate_(
juvenile = ~ (juvenile == 1),
color_left = ~ toupper(color_left),
color_left = ~ gsub("BK", "Bk", color_left),
color_left = ~ gsub("GY", "Gy", color_left),
color_left = ~ ifelse(is.na(color_left) | color_left == "",
"-", color_left),
color_right = ~ toupper(color_right),
color_right = ~ gsub("BK", "Bk", color_right),
color_right = ~ gsub("GY", "Gy", color_right),
color_right = ~ ifelse(is.na(color_right) | color_right == "",
"-", color_right),
taglft = ~ ifelse(is.na(taglft) | taglft == "", "-", taglft),
tagrt = ~ ifelse(is.na(tagrt) | tagrt == "", "-", tagrt),
locx = ~ ifelse(is.na(locx) | locx == "", "-", locx),
locy = ~ ifelse(is.na(locy) | locy == "", "-", locy),
colours = ~ paste(color_left, color_right, sep = "/"),
tags = ~ paste(taglft, tagrt, sep = "/"),
loc = ~ paste(locx, locy, sep = "/"),
reflo = ~ as_reflo(locx, locy)) %>%
rename_(left = "color_left", right = "color_right")
# find duplicate coloured squirrels
results <- results %>%
group_by_("left", "right") %>%
mutate_(duplicate = ~ (n() > 1)) %>%
ungroup()
# identify non-standard and bad colours
# replace two letter colours
results <- results %>%
# identify invalid colours
mutate_(valid = ~ grepl("^(([BRGYOWP]|Bk|Gy)[!*]?){1,2}$", left) | left == "-",
valid = ~ valid & (grepl("^(([BRGYOWP]|Bk|Gy)[!*]?){1,2}$", right) | right == "-")) %>%
# non-standard colours combos
mutate_(stdf = ~ grepl("^([BRGYOWP]|Bk|Gy)$", left) | left == "-",
stdf = ~ stdf & (grepl("^([BRGYOWP]|Bk|Gy)$", right) | right == "-"),
stdm = ~ grepl("^([BRGYOWP]|Bk|Gy)[!]$", left) | left == "-",
stdm = ~ stdm & (grepl("^([BRGYOWP]|Bk|Gy)[!]$", right) | right == "-"),
stdj = ~ grepl("^([BRGYOWP]|Bk|Gy)[*]$", left) | left == "-",
stdj = ~ stdj & (grepl("^([BRGYOWP]|Bk|Gy)[*]$", right) | right == "-"),
standard = ~ (sex == "M" & !juvenile & stdm),
standard = ~ standard | (sex == "F" & !juvenile & stdf),
standard = ~ standard | (juvenile & stdj),
standard = ~ coalesce(standard, FALSE)) %>%
select_("left", "right", "squirrel_id", "sex", "juvenile",
"tags", "colours", "loc", "reflo",
last_trapped = "date", "standard", "duplicate", "valid") %>%
arrange_("juvenile", "sex", "left", "right")
as.tbl(results)
}
| /R/krsp-colours.r | no_license | KluaneRedSquirrelProject/krsp | R | false | false | 5,752 | r | #' Colour key
#'
#' Display a colour key based on the most recent trapping records for a given
#' grid and year.
#'
#' @param con Connection to KRSP database
#' @param grid character; grid to display colour key for.
#' @param year integer; year to display colour key for. Defaults to current year.
#'
#' @return A data frame allowing identification of squirrels by colours. In this
#' data frame there are three flags: \code{valid} indicates whether the
#' colours are valid (allowing for at most 2 colours per ear),
#' \code{duplicate} indicates whether that combination of colours is used on
#' multiple squirrels, and \code{standard} indicates whether a squirrel has
#' standard colouring, i.e. one colour in each ear and no mixing of wires,
#' pipes, and discs.
#'
#' @export
#' @examples
#' \dontrun{
#' con <- krsp_connect()
#' krsp_colours(con, grid = "KL", year = 2014)
#' }
krsp_colours <- function(con, grid, year) {
UseMethod("krsp_colours")
}
#' @export
krsp_colours.krsp <- function(con, grid, year = current_year()) {
# assertion on arguments
assert_that(inherits(con, "src_dbi"),
valid_grid(grid, single = TRUE),
valid_year(year, single = TRUE))
year_arg <- as.integer(year)
# query for most recent trapping record
recent_query <- sprintf(
"SELECT
t.id, t.squirrel_id, t.date, t.ft, s.sex,
t.taglft, t.tagrt,
t.color_left, t.color_right,
t.locx, t.locy,
(l.id IS NOT NULL) AS juvenile
FROM
trapping t
INNER JOIN squirrel s
ON t.squirrel_id = s.id
LEFT JOIN juvenile j
ON s.id = j.squirrel_id
LEFT JOIN litter l
ON j.litter_id = l.id
AND YEAR(COALESCE(l.fieldBDate, l.date1, l.tagDt)) = %i
WHERE
s.gr = '%s'
AND (t.squirrel_id, t.date) IN (
SELECT squirrel_id, MAX(date) as max_date
FROM trapping
WHERE YEAR(date) = %i
GROUP BY squirrel_id);", year_arg, grid, year_arg)
recent <- krsp_sql(con, recent_query)
# query for juveniles
juve_query <- sprintf(
"SELECT
j.squirrel_id, l.tagDt AS date, j.sex,
j.taglft AS taglft, j.tagrt,
j.color_left, j.color_right,
l.nx2 AS locx, l.ny2 AS locy,
TRUE AS juvenile
FROM
litter l
INNER JOIN juvenile j
ON l.id = j.litter_id
WHERE
l.grid = '%s'
AND l.yr = %i
AND l.tagDt IS NOT NULL
AND (j.color_left IS NOT NULL OR j.color_right IS NOT NULL);",
grid, year_arg)
juveniles <- krsp_sql(con, juve_query)
# remove duplicate entries for squirrels
recent <- recent %>%
arrange_(~ desc(id)) %>%
# remove dead squirrels
filter_(~ ft %in% 1:3) %>%
group_by_("squirrel_id") %>%
filter_(~ row_number() == 1) %>%
ungroup() %>%
select_(~ -id, ~ -ft)
# combine litter and trapping data
trap_lit <- bind_rows(juveniles, recent) %>%
# only keep most recent between trapping and litter
group_by_("squirrel_id") %>%
arrange_(~ desc(date)) %>%
filter_(~ row_number() == 1) %>%
ungroup()
if (!is.data.frame(trap_lit) | nrow(trap_lit) == 0) {
return(as.tbl(data.frame(
)))
}
results <- trap_lit %>%
mutate_(
juvenile = ~ (juvenile == 1),
color_left = ~ toupper(color_left),
color_left = ~ gsub("BK", "Bk", color_left),
color_left = ~ gsub("GY", "Gy", color_left),
color_left = ~ ifelse(is.na(color_left) | color_left == "",
"-", color_left),
color_right = ~ toupper(color_right),
color_right = ~ gsub("BK", "Bk", color_right),
color_right = ~ gsub("GY", "Gy", color_right),
color_right = ~ ifelse(is.na(color_right) | color_right == "",
"-", color_right),
taglft = ~ ifelse(is.na(taglft) | taglft == "", "-", taglft),
tagrt = ~ ifelse(is.na(tagrt) | tagrt == "", "-", tagrt),
locx = ~ ifelse(is.na(locx) | locx == "", "-", locx),
locy = ~ ifelse(is.na(locy) | locy == "", "-", locy),
colours = ~ paste(color_left, color_right, sep = "/"),
tags = ~ paste(taglft, tagrt, sep = "/"),
loc = ~ paste(locx, locy, sep = "/"),
reflo = ~ as_reflo(locx, locy)) %>%
rename_(left = "color_left", right = "color_right")
# find duplicate coloured squirrels
results <- results %>%
group_by_("left", "right") %>%
mutate_(duplicate = ~ (n() > 1)) %>%
ungroup()
# identify non-standard and bad colours
# replace two letter colours
results <- results %>%
# identify invalid colours
mutate_(valid = ~ grepl("^(([BRGYOWP]|Bk|Gy)[!*]?){1,2}$", left) | left == "-",
valid = ~ valid & (grepl("^(([BRGYOWP]|Bk|Gy)[!*]?){1,2}$", right) | right == "-")) %>%
# non-standard colours combos
mutate_(stdf = ~ grepl("^([BRGYOWP]|Bk|Gy)$", left) | left == "-",
stdf = ~ stdf & (grepl("^([BRGYOWP]|Bk|Gy)$", right) | right == "-"),
stdm = ~ grepl("^([BRGYOWP]|Bk|Gy)[!]$", left) | left == "-",
stdm = ~ stdm & (grepl("^([BRGYOWP]|Bk|Gy)[!]$", right) | right == "-"),
stdj = ~ grepl("^([BRGYOWP]|Bk|Gy)[*]$", left) | left == "-",
stdj = ~ stdj & (grepl("^([BRGYOWP]|Bk|Gy)[*]$", right) | right == "-"),
standard = ~ (sex == "M" & !juvenile & stdm),
standard = ~ standard | (sex == "F" & !juvenile & stdf),
standard = ~ standard | (juvenile & stdj),
standard = ~ coalesce(standard, FALSE)) %>%
select_("left", "right", "squirrel_id", "sex", "juvenile",
"tags", "colours", "loc", "reflo",
last_trapped = "date", "standard", "duplicate", "valid") %>%
arrange_("juvenile", "sex", "left", "right")
as.tbl(results)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fitequation.R
\name{fitequation}
\alias{fitequation}
\title{fit equation}
\usage{
fitequation(input)
}
\arguments{
\item{input}{(-) input data as dataframe}
}
\value{
the equation of the fit as character
}
\description{
this function retrieves the equation for a linear model fit
}
\author{
Yoav BD
}
| /man/fitequation.Rd | no_license | yoavbendor1/LakeN | R | false | true | 380 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fitequation.R
\name{fitequation}
\alias{fitequation}
\title{fit equation}
\usage{
fitequation(input)
}
\arguments{
\item{input}{(-) input data as dataframe}
}
\value{
the equation of the fit as character
}
\description{
this function retrieves the equation for a linear model fit
}
\author{
Yoav BD
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.