code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
import datetime
import logging
import saltext.azurerm.utils.azurerm
# Azure libs
HAS_LIBS = False
try:
from azure.keyvault.keys import KeyClient
from azure.core.exceptions import (
ResourceNotFoundError,
HttpResponseError,
ResourceExistsError,
SerializationError,
)
HAS_LIBS = True
except ImportError:
pass
__func_alias__ = {"list_": "list"}
log = logging.getLogger(__name__)
def get_key_client(vault_url, **kwargs):
"""
.. versionadded:: 2.1.0
Load the key client and return a KeyClient object.
:param vault_url: The URL of the vault that the client will access.
"""
credential = saltext.azurerm.utils.azurerm.get_identity_credentials(**kwargs)
key_client = KeyClient(vault_url=vault_url, credential=credential)
return key_client
def _key_as_dict(key):
"""
Helper function to return a Key object as a dictionary
"""
result = {}
attrs = ["id", "key_operations", "key_type", "name", "properties"]
for attr in attrs:
val = getattr(key, attr)
if attr == "properties":
val = _key_properties_as_dict(val)
result[attr] = val
return result
def _key_properties_as_dict(key_properties):
"""
Helper function to return Key properties as a dictionary
"""
result = {}
props = [
"created_on",
"enabled",
"expires_on",
"id",
"managed",
"name",
"not_before",
"recovery_level",
"tags",
"updated_on",
"vault_url",
"version",
]
for prop in props:
val = getattr(key_properties, prop)
if isinstance(val, datetime.datetime):
val = val.isoformat()
result[prop] = val
return result
def backup_key(name, vault_url, **kwargs):
"""
.. versionadded:: 2.1.0
Back up a key in a protected form useable only by Azure Key Vault. Requires key/backup permission. This is intended
to allow copying a key from one vault to another. Both vaults must be owned by the same Azure subscription.
Also, backup / restore cannot be performed across geopolitical boundaries. For example, a backup from a vault
in a USA region cannot be restored to a vault in an EU region.
:param name: The name of the key to back up.
:param vault_url: The URL of the vault that the client will access.
CLI Example:
.. code-block:: bash
salt-call azurerm_keyvault_key.backup_key test_name test_vault
"""
result = {}
kconn = get_key_client(vault_url, **kwargs)
try:
backup = kconn.backup_key(name=name)
result = backup
except (ResourceNotFoundError, HttpResponseError) as exc:
result = {"error": str(exc)}
return result
def begin_delete_key(name, vault_url, **kwargs):
"""
.. versionadded:: 2.1.0
Delete all versions of a key and its cryptographic material. Requires keys/delete permission. If the vault has
soft-delete enabled, deletion may take several seconds to complete.
:param name: The name of the key to delete.
:param vault_url: The URL of the vault that the client will access.
CLI Example:
.. code-block:: bash
salt-call azurerm_keyvault_key.begin_delete_key test_name test_vault
"""
result = {}
kconn = get_key_client(vault_url, **kwargs)
try:
key = kconn.begin_delete_key(name=name)
key.wait()
result = _key_as_dict(key.result())
except (ResourceNotFoundError, HttpResponseError) as exc:
result = {"error": str(exc)}
return result
def begin_recover_deleted_key(name, vault_url, **kwargs):
"""
.. versionadded:: 2.1.0
Recover a deleted key to its latest version. Possible only in a vault with soft-delete enabled. Requires
keys/recover permission. If the vault does not have soft-delete enabled, the begin_delete_key operation is
permanent, and this method will raise an error. Attempting to recover a non-deleted key will also raise an error.
:param name: The name of the deleted key to recover.
:param vault_url: The URL of the vault that the client will access.
CLI Example:
.. code-block:: bash
salt-call azurerm_keyvault_key.begin_recover_deleted_key test_name test_vault
"""
result = {}
kconn = get_key_client(vault_url, **kwargs)
try:
key = kconn.begin_recover_deleted_key(name=name)
key.wait()
result = _key_as_dict(key.result())
except HttpResponseError as exc:
result = {"error": str(exc)}
return result
def create_ec_key(
name,
vault_url,
curve=None,
key_operations=None,
hardware_protected=None,
enabled=None,
expires_on=None,
not_before=None,
tags=None,
**kwargs,
):
"""
.. versionadded:: 2.1.0
Create a new elliptic curve key or, if name is already in use, create a new version of the key. Requires the
keys/create permission. Key properties can be specified as keyword arguments.
:param name: The name of the new key. Key names can only contain alphanumeric characters and dashes.
:param vault_url: The URL of the vault that the client will access.
:param curve: Elliptic curve name. Defaults to the NIST P-256 elliptic curve. Possible values include: "P-256",
"P-256K", "P-384", "P-521".
:param key_operations: A list of permitted key operations. Possible values include: 'decrypt', 'encrypt', 'sign',
'unwrap_key', 'verify', 'wrap_key'.
:param hardware_protected: A boolean value specifying whether the key should be created in a hardware security
module. Defaults to False.
:param enabled: A boolean value specifying whether the key is enabled for use.
:param expires_on: When the key will expire, in UTC. This parameter should be a string representation of a
Datetime object in ISO-8601 format.
:param not_before: The time before which the key can not be used, in UTC. This parameter should be a string
representation of a Datetime object in ISO-8601 format.
:param tags: Application specific metadata in the form of key-value pairs.
CLI Example:
.. code-block:: bash
salt-call azurerm_keyvault_key.create_ec_key test_name test_vault
"""
result = {}
kconn = get_key_client(vault_url, **kwargs)
try:
key = kconn.create_ec_key(
name=name,
curve=curve,
key_operations=key_operations,
hardware_protected=hardware_protected,
enabled=enabled,
expires_on=expires_on,
not_before=not_before,
tags=tags,
)
result = _key_as_dict(key)
except HttpResponseError as exc:
result = {"error": str(exc)}
return result
def create_key(
name,
key_type,
vault_url,
key_operations=None,
size=None,
curve=None,
enabled=None,
expires_on=None,
not_before=None,
tags=None,
**kwargs,
):
"""
.. versionadded:: 2.1.0
Create a key or, if name is already in use, create a new version of the key. Requires keys/create permission.
Key properties can be specified as keyword arguments.
:param name: The name of the new key. Key names can only contain alphanumeric characters and dashes.
:param key_type: The type of key to create. Possible values include: 'ec', 'ec_hsm', 'oct', 'rsa', 'rsa_hsm'.
:param vault_url: The URL of the vault that the client will access.
:param key_operations: A list of permitted key operations. Possible values include: 'decrypt', 'encrypt', 'sign',
'unwrap_key', 'verify', 'wrap_key'.
:param size: RSA key size in bits, for example 2048, 3072, or 4096. Applies to RSA keys only.
:param curve: Elliptic curve name. Defaults to the NIST P-256 elliptic curve. Possible values include: "P-256",
"P-256K", "P-384", "P-521".
:param enabled: Whether the key is enabled for use.
:param expires_on: When the key will expire, in UTC. This parameter should be a string representation of a Datetime
object in ISO-8601 format.
:param not_before: The time before which the key can not be used, in UTC. This parameter should be a string
representation of a Datetime object in ISO-8601 format.
:param tags: Application specific metadata in the form of key-value pairs.
CLI Example:
.. code-block:: bash
salt-call azurerm_keyvault_key.create_key test_name test_type test_vault
"""
result = {}
kconn = get_key_client(vault_url, **kwargs)
if key_type != "oct":
key_type = key_type.upper().replace("_", "-")
try:
key = kconn.create_key(
name=name,
key_type=key_type,
enabled=enabled,
size=size,
curve=curve,
expires_on=expires_on,
not_before=not_before,
tags=tags,
key_operations=key_operations,
)
result = _key_as_dict(key)
except HttpResponseError as exc:
result = {"error": str(exc)}
return result
def create_rsa_key(
name,
vault_url,
size=None,
key_operations=None,
hardware_protected=None,
enabled=None,
expires_on=None,
not_before=None,
tags=None,
**kwargs,
):
"""
.. versionadded:: 2.1.0
Create a new RSA key or, if name is already in use, create a new version of the key. Requires the keys/create
permission. Key properties can be specified as keyword arguments.
:param name: The name of the new key. Key names can only contain alphanumeric characters and dashes.
:param vault_url: The URL of the vault that the client will access.
:param size: Key size in bits, for example 2048, 3072, or 4096.
:param key_operations: A list of permitted key operations. Possible values include: 'decrypt', 'encrypt', 'sign',
'unwrap_key', 'verify', 'wrap_key'.
:param hardware_protected: A boolean value specifying whether the key should be created in a hardware security
module. Defaults to False.
:param enabled: Whether the key is enabled for use.
:param expires_on: When the key will expire, in UTC. This parameter should be a string representation of a Datetime
object in ISO-8601 format.
:param not_before: The time before which the key can not be used, in UTC. This parameter should be a string
representation of a Datetime object in ISO-8601 format.
:param tags: Application specific metadata in the form of key-value pairs.
CLI Example:
.. code-block:: bash
salt-call azurerm_keyvault_key.create_rsa_key test_name test_vault
"""
result = {}
kconn = get_key_client(vault_url, **kwargs)
try:
key = kconn.create_rsa_key(
name=name,
key_operations=key_operations,
size=size,
hardware_protected=hardware_protected,
enabled=enabled,
expires_on=expires_on,
not_before=not_before,
tags=tags,
)
result = _key_as_dict(key)
except HttpResponseError as exc:
result = {"error": str(exc)}
return result
def get_deleted_key(name, vault_url, **kwargs):
"""
.. versionadded:: 2.1.0
Get a deleted key. Possible only in a vault with soft-delete enabled. Requires keys/get permission.
:param name: The name of the key.
:param vault_url: The URL of the vault that the client will access.
CLI Example:
.. code-block:: bash
salt-call azurerm_keyvault_key.get_deleted_key test_name test_vault
"""
result = {}
kconn = get_key_client(vault_url, **kwargs)
try:
key = kconn.get_deleted_key(name=name)
result = _key_as_dict(key)
except (ResourceNotFoundError, HttpResponseError) as exc:
result = {"error": str(exc)}
return result
def get_key(name, vault_url, version=None, **kwargs):
"""
.. versionadded:: 2.1.0
Get a key's attributes and, if it's an asymmetric key, its public material. Requires keys/get permission.
:param name: The name of the key to get.
:param vault_url: The URL of the vault that the client will access.
:param version: Used to specify the version of the key to get. If not specified, gets the latest version of the key.
CLI Example:
.. code-block:: bash
salt-call azurerm_keyvault_key.get_key test_name test_vault
"""
result = {}
kconn = get_key_client(vault_url, **kwargs)
try:
key = kconn.get_key(name=name, version=version)
result = _key_as_dict(key)
except (ResourceNotFoundError, HttpResponseError) as exc:
result = {"error": str(exc)}
return result
def import_key(
name,
vault_url,
hardware_protected=None,
enabled=None,
not_before=None,
expires_on=None,
tags=None,
**kwargs,
):
"""
.. versionadded:: 2.1.0
Import a key created externally. Requires keys/import permission. If name is already in use, the key will be
imported as a new version. Parameters used to build a JSONWebKey object will be passed to this module. More
information about some of those parameters can be found at the following link:
https://tools.ietf.org/html/draft-ietf-jose-json-web-key-18.
:param name: The name of the imported key.
:param vault_url: The URL of the vault that the client will access.
:param hardware_protected: A boolean value specifying whether the key should be created in a hardware
security module. Defaults to False.
:param enabled: A boolean value specifying whether the key is enabled for use.
:param expires_on: When the key will expire, in UTC. This parameter should be a string representation
of a Datetime object in ISO-8601 format.
:param not_before: The time before which the key can not be used, in UTC. This parameter should be a
string representation of a Datetime object in ISO-8601 format.
:param tags: Application specific metadata in the form of key-value pairs.
Additional parameters passed as keyword arguments are used to build a JSONWebKey object will be passed to this
module. Below some of those parameters are defined. More information about some of those parameters can be
found at the following link: https://tools.ietf.org/html/draft-ietf-jose-json-web-key-18.
:param kid: Key identifier.
:param kty: Key type. Possible values inclide: 'ec', 'ec_hsm', 'oct', 'rsa', 'rsa_hsm'.
:param key_ops: A list of allow operations for the key. Possible elements of the list include: 'decrypt',
'encrypt', 'sign', 'unwrap_key', 'verify', 'wrap_key'
:param n: RSA modulus.
:param e: RSA public exponent.
:param d: RSA private exponent, or the D component of the EC private key.
:param dp: RSA private key parameter.
:param dq: RSA private key parameter.
:param qi: RSA private key parameter.
:param p: RSA secret prime.
:param q: RSA secret prime, with p < q.
:param k: Symmetric key.
:param t: HSM Token, used with 'Bring Your Own Key'.
:param crv: Elliptic curve name. Posible values include: 'p_256', 'p_256_k', 'p_384', and 'p_521'.
:param x: X component of an EC public key.
:param y: Y component of an EC public key.
CLI Example:
.. code-block:: bash
salt-call azurerm_keyvault_key.import_key test_name test_vault test_webkey_params
"""
result = {}
kconn = get_key_client(vault_url, **kwargs)
if kwargs["key_type"] != "oct":
kwargs["key_type"] = kwargs.get("key_type").upper().replace("_", "-")
try:
keymodel = saltext.azurerm.utils.azurerm.create_object_model(
"keyvault-keys", "JsonWebKey", **kwargs
)
except TypeError as exc:
result = {"error": "The object model could not be built. ({})".format(str(exc))}
return result
try:
key = kconn.import_key(
name=name,
hardware_protected=hardware_protected,
enabled=enabled,
tags=tags,
not_before=not_before,
expires_on=expires_on,
key=keymodel,
)
result = _key_as_dict(key)
except HttpResponseError as exc:
result = {"error": str(exc)}
return result
def list_(vault_url, **kwargs):
"""
.. versionadded:: 2.1.0
List identifiers and properties of all keys in the vault. Requires keys/list permission.
:param vault_url: The URL of the vault that the client will access.
CLI Example:
.. code-block:: bash
salt-call azurerm_keyvault_key.list test_vault
"""
result = {}
kconn = get_key_client(vault_url, **kwargs)
try:
keys = kconn.list_properties_of_keys()
for key in keys:
result[key.name] = _key_properties_as_dict(key)
except ResourceNotFoundError as exc:
result = {"error": str(exc)}
return result
def list_properties_of_key_versions(name, vault_url, **kwargs):
"""
.. versionadded:: 2.1.0
List the identifiers and properties of a key's versions. Requires keys/list permission.
:param name: The name of the key.
:param vault_url: The URL of the vault that the client will access.
CLI Example:
.. code-block:: bash
salt-call azurerm_keyvault_key.list_properties_of_key_versions test_name test_vault
"""
result = {}
kconn = get_key_client(vault_url, **kwargs)
try:
keys = kconn.list_properties_of_key_versions(name=name)
for key in keys:
result[key.name] = _key_properties_as_dict(key)
except ResourceNotFoundError as exc:
result = {"error": str(exc)}
return result
def list_deleted_keys(vault_url, **kwargs):
"""
.. versionadded:: 2.1.0
List all deleted keys, including the public part of each. Possible only in a vault with soft-delete enabled.
Requires keys/list permission.
:param vault_url: The URL of the vault that the client will access.
CLI Example:
.. code-block:: bash
salt-call azurerm_keyvault_key.list_deleted_keys test_vault
"""
result = {}
kconn = get_key_client(vault_url, **kwargs)
try:
keys = kconn.list_deleted_keys()
for key in keys:
result[key.name] = _key_as_dict(key)
except ResourceNotFoundError as exc:
result = {"error": str(exc)}
return result
def purge_deleted_key(name, vault_url, **kwargs):
"""
.. versionadded:: 2.1.0
Permanently deletes a deleted key. Only possible in a vault with soft-delete enabled. Performs an irreversible
deletion of the specified key, without possibility for recovery. The operation is not available if the
recovery_level does not specify 'Purgeable'. This method is only necessary for purging a key before its
scheduled_purge_date. Requires keys/purge permission.
:param name: The name of the deleted key to purge.
:param vault_url: The URL of the vault that the client will access.
CLI Example:
.. code-block:: bash
salt-call azurerm_keyvault_key.purge_deleted_key test_name test_vault
"""
result = False
kconn = get_key_client(vault_url, **kwargs)
try:
kconn.purge_deleted_key(name=name)
result = True
except HttpResponseError as exc:
result = {"error": str(exc)}
return result
def restore_key_backup(backup, vault_url, **kwargs):
"""
.. versionadded:: 2.1.0
Restore a key backup to the vault. This imports all versions of the key, with its name, attributes, and access
control policies. If the key's name is already in use, restoring it will fail. Also, the target vault must be
owned by the same Microsoft Azure subscription as the source vault. Requires keys/restore permission.
:param backup: A key backup as returned by the backup_key operation.
:param vault_url: The URL of the vault that the client will access.
CLI Example:
.. code-block:: bash
salt-call azurerm_keyvault_key.restore_key_backup test_backup test_vault
"""
result = {}
kconn = get_key_client(vault_url, **kwargs)
try:
key = kconn.restore_key_backup(
backup=backup,
)
result = _key_as_dict(key)
except (ResourceExistsError, HttpResponseError, SerializationError) as exc:
result = {"error": str(exc)}
return result
def update_key_properties(
name,
vault_url,
version=None,
key_operations=None,
enabled=None,
expires_on=None,
not_before=None,
tags=None,
**kwargs,
):
"""
.. versionadded:: 2.1.0
Change a key's properties (not its cryptographic material). Requires keys/update permission. Key properties that
need to be updated can be specified as keyword arguments.
:param name: The name of the key to update.
:param vault_url: The URL of the vault that the client will access.
:param version: Used to specify the version of the key to update. If no version is specified, the latest
version of the key will be updated.
:param key_operations: A list of permitted key operations. Possible values include: 'decrypt', 'encrypt',
'sign', 'unwrap_key', 'verify', 'wrap_key'.
:param enabled: Whether the key is enabled for use.
:param expires_on: When the key will expire, in UTC. This parameter should be a string representation
of a Datetime object in ISO-8601 format.
:param not_before: The time before which the key can not be used, in UTC. This parameter should be a
string representation of a Datetime object in ISO-8601 format.
:param tags: Application specific metadata in the form of key-value pairs.
CLI Example:
.. code-block:: bash
salt-call azurerm_keyvault_key.update_key_properties test_name test_vault test_version
"""
result = {}
kconn = get_key_client(vault_url, **kwargs)
try:
key = kconn.update_key_properties(
name=name,
version=version,
key_operations=key_operations,
enabled=enabled,
expires_on=expires_on,
not_before=not_before,
tags=tags,
)
result = _key_as_dict(key)
except (ResourceNotFoundError, HttpResponseError) as exc:
result = {"error": str(exc)}
return result | /saltext.azurerm-4.0.1.tar.gz/saltext.azurerm-4.0.1/src/saltext/azurerm/modules/azurerm_keyvault_key.py | 0.790773 | 0.153771 | azurerm_keyvault_key.py | pypi |
import logging
import saltext.azurerm.utils.azurerm
# Azure libs
HAS_LIBS = False
try:
import azure.mgmt.compute.models # pylint: disable=unused-import
from azure.core.exceptions import (
HttpResponseError,
)
HAS_LIBS = True
except ImportError:
pass
__func_alias__ = {"list_": "list"}
log = logging.getLogger(__name__)
def create_or_update(
name,
vm_name,
resource_group,
location,
publisher,
extension_type,
version,
settings,
auto_upgrade_minor_version=None,
**kwargs,
):
"""
.. versionadded:: 2.1.0
The operation to create or update the extension.
:param name: The name of the virtual machine extension.
:param vm_name: The name of the virtual machine where the extension should be created or updated.
:param resource_group: The name of the resource group.
:param location: Resource location.
:param publisher: The publisher of the extension.
:param extension_type: Specifies the type of the extension; an example is "CustomScriptExtension".
:param version: Specifies the version of the script handler.
:param settings: A dictionary representing the public settings for the extension. This dictionary will be
utilized as JSON by the SDK operation.
:param auto_upgrade_minor_version: A boolean value indicating whether the extension should use a newer minor version
if one is available at deployment time. Once deployed, however, the extension will not upgrade minor versions
unless redeployed, even with this property set to True.
:param tags: A dictionary of strings can be passed as tag metadata to the virtual machine extension object.
CLI Example:
.. code-block:: bash
salt-call azurerm_compute_virtual_machine_extension.create_or_update test_name test_vm test_group
test_loc test_publisher test_type test_version test_settings
"""
result = {}
compconn = saltext.azurerm.utils.azurerm.get_client("compute", **kwargs)
try:
paramsmodel = saltext.azurerm.utils.azurerm.create_object_model(
"compute",
"VirtualMachineExtension",
location=location,
settings=settings,
publisher=publisher,
type_properties_type=extension_type,
type_handler_version=version,
auto_upgrade_minor_version=auto_upgrade_minor_version,
**kwargs,
)
except TypeError as exc:
result = {"error": "The object model could not be built. ({})".format(str(exc))}
return result
try:
extension = compconn.virtual_machine_extensions.begin_create_or_update(
vm_extension_name=name,
vm_name=vm_name,
resource_group_name=resource_group,
extension_parameters=paramsmodel,
)
extension.wait()
result = extension.result().as_dict()
except HttpResponseError as exc:
saltext.azurerm.utils.azurerm.log_cloud_error("compute", str(exc), **kwargs)
result = {"error": str(exc)}
return result
def delete(name, vm_name, resource_group, **kwargs):
"""
.. versionadded:: 2.1.0
The operation to delete the extension.
:param name: The name of the virtual machine extension.
:param vm_name: The name of the virtual machine where the extension should be deleted.
:param resource_group: The name of the resource group.
CLI Example:
.. code-block:: bash
salt-call azurerm_compute_virtual_machine_extension.delete test_name test_vm test_group
"""
result = False
compconn = saltext.azurerm.utils.azurerm.get_client("compute", **kwargs)
try:
extension = compconn.virtual_machine_extensions.begin_delete(
vm_extension_name=name, vm_name=vm_name, resource_group_name=resource_group
)
extension.wait()
result = True
except HttpResponseError as exc:
saltext.azurerm.utils.azurerm.log_cloud_error("compute", str(exc), **kwargs)
result = {"error": str(exc)}
return result
def get(name, vm_name, resource_group, **kwargs):
"""
.. versionadded:: 2.1.0
The operation to get the extension.
:param name: The name of the virtual machine extension.
:param vm_name: The name of the virtual machine containing the extension.
:param resource_group: The name of the resource group.
CLI Example:
.. code-block:: bash
salt-call azurerm_compute_virtual_machine_extension.get test_name test_vm test_group
"""
result = {}
compconn = saltext.azurerm.utils.azurerm.get_client("compute", **kwargs)
try:
extension = compconn.virtual_machine_extensions.get(
vm_extension_name=name, vm_name=vm_name, resource_group_name=resource_group
)
result = extension.as_dict()
except HttpResponseError as exc:
saltext.azurerm.utils.azurerm.log_cloud_error("compute", str(exc), **kwargs)
result = {"error": str(exc)}
return result
def list_(vm_name, resource_group, **kwargs):
"""
.. versionadded:: 2.1.0
The operation to get all extensions of a Virtual Machine.
:param vm_name: The name of the virtual machine containing the extension.
:param resource_group: The name of the resource group.
CLI Example:
.. code-block:: bash
salt-call azurerm_compute_virtual_machine_extension.list test_vm test_group
"""
result = {}
compconn = saltext.azurerm.utils.azurerm.get_client("compute", **kwargs)
try:
extensions = compconn.virtual_machine_extensions.list(
vm_name=vm_name, resource_group_name=resource_group
)
extensions_as_list = extensions.as_dict().get("value", {})
for extension in extensions_as_list:
result[extension["name"]] = extension
except HttpResponseError as exc:
saltext.azurerm.utils.azurerm.log_cloud_error("compute", str(exc), **kwargs)
result = {"error": str(exc)}
return result | /saltext.azurerm-4.0.1.tar.gz/saltext.azurerm-4.0.1/src/saltext/azurerm/modules/azurerm_compute_virtual_machine_extension.py | 0.779741 | 0.1996 | azurerm_compute_virtual_machine_extension.py | pypi |
import logging
# Azure libs
HAS_LIBS = False
try:
import azure.mgmt.compute.models # pylint: disable=unused-import
HAS_LIBS = True
except ImportError:
pass
__virtualname__ = "azurerm_compute"
log = logging.getLogger(__name__)
def __virtual__():
if not HAS_LIBS:
return (
False,
"The following dependencies are required to use the Azure Resource Manager modules: "
"Microsoft Azure SDK for Python >= 2.0rc6, "
"MS REST Azure (msrestazure) >= 0.4",
)
return __virtualname__
def availability_set_create_or_update(
name, resource_group, **kwargs
): # pylint: disable=invalid-name
"""
.. versionadded:: 2019.2.0
**WARNING: This function has been moved to another file (azurerm_compute_availability_set.py)
and will be deprecated in the future.**
Create or update an availability set.
:param name: The availability set to create.
:param resource_group: The resource group name assigned to the
availability set.
CLI Example:
.. code-block:: bash
salt-call azurerm_compute.availability_set_create_or_update testset testgroup
"""
return __salt__["azurerm_compute_availability_set.create_or_update"](
name=name, resource_group=resource_group, **kwargs
)
def availability_set_delete(name, resource_group, **kwargs):
"""
.. versionadded:: 2019.2.0
**WARNING: This function has been moved to another file (azurerm_compute_availability_set.py)
and will be deprecated in the future.**
Delete an availability set.
:param name: The availability set to delete.
:param resource_group: The resource group name assigned to the
availability set.
CLI Example:
.. code-block:: bash
salt-call azurerm_compute.availability_set_delete testset testgroup
"""
return __salt__["azurerm_compute_availability_set.delete"](
name=name, resource_group=resource_group, **kwargs
)
def availability_set_get(name, resource_group, **kwargs):
"""
.. versionadded:: 2019.2.0
**WARNING: This function has been moved to another file (azurerm_compute_availability_set.py)
and will be deprecated in the future.**
Get a dictionary representing an availability set's properties.
:param name: The availability set to get.
:param resource_group: The resource group name assigned to the
availability set.
CLI Example:
.. code-block:: bash
salt-call azurerm_compute.availability_set_get testset testgroup
"""
return __salt__["azurerm_compute_availability_set.get"](
name=name, resource_group=resource_group, **kwargs
)
def availability_sets_list(resource_group, **kwargs):
"""
.. versionadded:: 2019.2.0
**WARNING: This function has been moved to another file (azurerm_compute_availability_set.py)
and will be deprecated in the future.**
List all availability sets within a resource group.
:param resource_group: The resource group name to list availability
sets within.
CLI Example:
.. code-block:: bash
salt-call azurerm_compute.availability_sets_list testgroup
"""
return __salt__["azurerm_compute_availability_set.list"](
resource_group=resource_group, **kwargs
)
def availability_sets_list_available_sizes(name, resource_group, **kwargs):
"""
.. versionadded:: 2019.2.0
**WARNING: This function has been moved to another file (azurerm_compute_availability_set.py)
and will be deprecated in the future.**
List all available virtual machine sizes that can be used to
to create a new virtual machine in an existing availability set.
:param name: The availability set name to list available
virtual machine sizes within.
:param resource_group: The resource group name to list available
availability set sizes within.
CLI Example:
.. code-block:: bash
salt-call azurerm_compute.availability_sets_list_available_sizes testset testgroup
"""
return __salt__["azurerm_compute_availability_set.list_available_sizes"](
name=name, resource_group=resource_group, **kwargs
)
def virtual_machine_capture(
name, destination_name, resource_group, prefix="capture-", overwrite=False, **kwargs
):
"""
.. versionadded:: 2019.2.0
**WARNING: This function has been moved to another file (azurerm_compute_virtual_machine.py)
and will be deprecated in the future.**
Captures the VM by copying virtual hard disks of the VM and outputs
a template that can be used to create similar VMs.
:param name: The name of the virtual machine.
:param destination_name: The destination container name.
:param resource_group: The resource group name assigned to the
virtual machine.
:param prefix: (Default: 'capture-') The captured virtual hard disk's name prefix.
:param overwrite: (Default: False) Overwrite the destination disk in case of conflict.
CLI Example:
.. code-block:: bash
salt-call azurerm_compute.virtual_machine_capture testvm testcontainer testgroup
"""
return __salt__["azurerm_compute_virtual_machine.capture"](
name=name,
destination_name=destination_name,
resource_group=resource_group,
prefix=prefix,
overwrite=overwrite,
**kwargs,
)
def virtual_machine_get(name, resource_group, **kwargs):
"""
.. versionadded:: 2019.2.0
**WARNING: This function has been moved to another file (azurerm_compute_virtual_machine.py)
and will be deprecated in the future.**
Retrieves information about the model view or the instance view of a
virtual machine.
:param name: The name of the virtual machine.
:param resource_group: The resource group name assigned to the
virtual machine.
CLI Example:
.. code-block:: bash
salt-call azurerm_compute.virtual_machine_get testvm testgroup
"""
return __salt__["azurerm_compute_virtual_machine.get"](
name=name, resource_group=resource_group, **kwargs
)
def virtual_machine_convert_to_managed_disks(name, resource_group, **kwargs):
"""
.. versionadded:: 2019.2.0
**WARNING: This function has been moved to another file (azurerm_compute_virtual_machine.py)
and will be deprecated in the future.**
Converts virtual machine disks from blob-based to managed disks. Virtual
machine must be stop-deallocated before invoking this operation.
:param name: The name of the virtual machine to convert.
:param resource_group: The resource group name assigned to the
virtual machine.
CLI Example:
.. code-block:: bash
salt-call azurerm_compute.virtual_machine_convert_to_managed_disks testvm testgroup
"""
return __salt__["azurerm_compute_virtual_machine.convert_to_managed_disks"](
name=name, resource_group=resource_group, **kwargs
)
def virtual_machine_deallocate(name, resource_group, **kwargs):
"""
.. versionadded:: 2019.2.0
**WARNING: This function has been moved to another file (azurerm_compute_virtual_machine.py)
and will be deprecated in the future.**
Power off a virtual machine and deallocate compute resources.
:param name: The name of the virtual machine to deallocate.
:param resource_group: The resource group name assigned to the
virtual machine.
CLI Example:
.. code-block:: bash
salt-call azurerm_compute.virtual_machine_deallocate testvm testgroup
"""
return __salt__["azurerm_compute_virtual_machine.deallocate"](
name=name, resource_group=resource_group, **kwargs
)
def virtual_machine_generalize(name, resource_group, **kwargs):
"""
.. versionadded:: 2019.2.0
**WARNING: This function has been moved to another file (azurerm_compute_virtual_machine.py)
and will be deprecated in the future.**
Set the state of a virtual machine to 'generalized'.
:param name: The name of the virtual machine.
:param resource_group: The resource group name assigned to the
virtual machine.
CLI Example:
.. code-block:: bash
salt-call azurerm_compute.virtual_machine_generalize testvm testgroup
"""
return __salt__["azurerm_compute_virtual_machine.generalize"](
name=name, resource_group=resource_group, **kwargs
)
def virtual_machines_list(resource_group, **kwargs):
"""
.. versionadded:: 2019.2.0
**WARNING: This function has been moved to another file (azurerm_compute_virtual_machine.py)
and will be deprecated in the future.**
List all virtual machines within a resource group.
:param resource_group: The resource group name to list virtual
machines within.
CLI Example:
.. code-block:: bash
salt-call azurerm_compute.virtual_machines_list testgroup
"""
return __salt__["azurerm_compute_virtual_machine.list"](resource_group=resource_group, **kwargs)
def virtual_machines_list_all(**kwargs):
"""
.. versionadded:: 2019.2.0
**WARNING: This function has been moved to another file (azurerm_compute_virtual_machine.py)
and will be deprecated in the future.**
List all virtual machines within a subscription.
CLI Example:
.. code-block:: bash
salt-call azurerm_compute.virtual_machines_list_all
"""
return __salt__["azurerm_compute_virtual_machine.list_all"](**kwargs)
def virtual_machines_list_available_sizes(
name, resource_group, **kwargs
): # pylint: disable=invalid-name
"""
.. versionadded:: 2019.2.0
**WARNING: This function has been moved to another file (azurerm_compute_virtual_machine.py)
and will be deprecated in the future.**
Lists all available virtual machine sizes to which the specified virtual
machine can be resized.
:param name: The name of the virtual machine.
:param resource_group: The resource group name assigned to the
virtual machine.
CLI Example:
.. code-block:: bash
salt-call azurerm_compute.virtual_machines_list_available_sizes testvm testgroup
"""
return __salt__["azurerm_compute_virtual_machine.list_available_sizes"](
name=name, resource_group=resource_group, **kwargs
)
def virtual_machine_power_off(name, resource_group, **kwargs):
"""
.. versionadded:: 2019.2.0
**WARNING: This function has been moved to another file (azurerm_compute_virtual_machine.py)
and will be deprecated in the future.**
Power off (stop) a virtual machine.
:param name: The name of the virtual machine to stop.
:param resource_group: The resource group name assigned to the
virtual machine.
CLI Example:
.. code-block:: bash
salt-call azurerm_compute.virtual_machine_power_off testvm testgroup
"""
return __salt__["azurerm_compute_virtual_machine.power_off"](
name=name, resource_group=resource_group, **kwargs
)
def virtual_machine_restart(name, resource_group, **kwargs):
"""
.. versionadded:: 2019.2.0
**WARNING: This function has been moved to another file (azurerm_compute_virtual_machine.py)
and will be deprecated in the future.**
Restart a virtual machine.
:param name: The name of the virtual machine to restart.
:param resource_group: The resource group name assigned to the
virtual machine.
CLI Example:
.. code-block:: bash
salt-call azurerm_compute.virtual_machine_restart testvm testgroup
"""
return __salt__["azurerm_compute_virtual_machine.restart"](
name=name, resource_group=resource_group, **kwargs
)
def virtual_machine_start(name, resource_group, **kwargs):
"""
.. versionadded:: 2019.2.0
**WARNING: This function has been moved to another file (azurerm_compute_virtual_machine.py)
and will be deprecated in the future.**
Power on (start) a virtual machine.
:param name: The name of the virtual machine to start.
:param resource_group: The resource group name assigned to the
virtual machine.
CLI Example:
.. code-block:: bash
salt-call azurerm_compute.virtual_machine_start testvm testgroup
"""
return __salt__["azurerm_compute_virtual_machine.start"](
name=name, resource_group=resource_group, **kwargs
)
def virtual_machine_redeploy(name, resource_group, **kwargs):
"""
.. versionadded:: 2019.2.0
**WARNING: This function has been moved to another file (azurerm_compute_virtual_machine.py)
and will be deprecated in the future.**
Redeploy a virtual machine.
:param name: The name of the virtual machine to redeploy.
:param resource_group: The resource group name assigned to the
virtual machine.
CLI Example:
.. code-block:: bash
salt-call azurerm_compute.virtual_machine_redeploy testvm testgroup
"""
return __salt__["azurerm_compute_virtual_machine.redeploy"](
name=name, resource_group=resource_group, **kwargs
) | /saltext.azurerm-4.0.1.tar.gz/saltext.azurerm-4.0.1/src/saltext/azurerm/modules/azurerm_compute.py | 0.765944 | 0.215557 | azurerm_compute.py | pypi |
import logging
import saltext.azurerm.utils.azurerm
# Azure libs
HAS_LIBS = False
try:
import azure.mgmt.compute.models # pylint: disable=unused-import
from azure.core.exceptions import (
HttpResponseError,
SerializationError,
)
from azure.mgmt.core.tools import is_valid_resource_id
HAS_LIBS = True
except ImportError:
pass
__func_alias__ = {"list_": "list"}
log = logging.getLogger(__name__)
def create_or_update(
name,
resource_group,
source_vm=None,
source_vm_group=None,
os_disk=None,
data_disks=None,
zone_resilient=False,
hyper_vgeneration=None,
**kwargs,
):
"""
.. versionadded:: 2.1.0
Create or update an image.
:param name: The image to create.
:param resource_group: The resource group name assigned to the image.
:param source_vm: The name of the virtual machine from which the image is created. This parameter or a valid
os_disk is required.
:param source_vm_group: The name of the resource group containing the source virtual machine.
This defaults to the same resource group specified for the resultant image.
:param os_disk: The resource ID of an operating system disk to use for the image.
:param data_disks: The resource ID or list of resource IDs associated with data disks to add to
the image.
:param zone_resilient: Specifies whether an image is zone resilient or not. Zone resilient images
can be created only in regions that provide Zone Redundant Storage (ZRS).
:param hyper_vgeneration: Gets the HyperVGenerationType of the VirtualMachine created from the image. Possible
values include: "V1" and "V2".
CLI Example:
.. code-block:: bash
salt-call azurerm_compute_image.create_or_update testimage testgroup
"""
if "location" not in kwargs:
rg_props = __salt__["azurerm_resource.resource_group_get"](resource_group, **kwargs)
if "error" in rg_props:
log.error("Unable to determine location from resource group specified.")
return {"error": "Unable to determine location from resource group specified."}
kwargs["location"] = rg_props["location"]
result = {}
compconn = saltext.azurerm.utils.azurerm.get_client("compute", **kwargs)
if source_vm:
# Use VM name to link to the IDs of existing VMs.
vm_instance = __salt__["azurerm_compute_virtual_machine.get"](
name=source_vm,
resource_group=(source_vm_group or resource_group),
log_level="info",
**kwargs,
)
if "error" in vm_instance:
errmsg = "The source virtual machine could not be found."
log.error(errmsg)
result = {"error": errmsg}
return result
source_vm = {"id": str(vm_instance["id"])}
spmodel = None
if os_disk:
if is_valid_resource_id(os_disk):
os_disk = {"id": os_disk}
else:
errmsg = "The os_disk parameter is not a valid resource ID string."
log.error(errmsg)
result = {"error": errmsg}
return result
if data_disks:
if isinstance(data_disks, list):
data_disks = [{"id": dd} for dd in data_disks]
elif isinstance(data_disks, str):
data_disks = [{"id": data_disks}]
else:
errmsg = "The data_disk parameter is a single resource ID string or a list of resource IDs."
log.error(errmsg)
result = {"error": errmsg}
return result
try:
spmodel = saltext.azurerm.utils.azurerm.create_object_model(
"compute",
"ImageStorageProfile",
os_disk=os_disk,
data_disks=data_disks,
zone_resilient=zone_resilient,
**kwargs,
)
except TypeError as exc:
result = {"error": "The object model could not be built. ({})".format(str(exc))}
return result
try:
imagemodel = saltext.azurerm.utils.azurerm.create_object_model(
"compute",
"Image",
source_virtual_machine=source_vm,
storage_profile=spmodel,
hyper_vgeneration=hyper_vgeneration,
**kwargs,
)
except TypeError as exc:
result = {"error": "The object model could not be built. ({})".format(str(exc))}
return result
try:
image = compconn.images.begin_create_or_update(
resource_group_name=resource_group, image_name=name, parameters=imagemodel
)
image.wait()
result = image.result().as_dict()
except HttpResponseError as exc:
saltext.azurerm.utils.azurerm.log_cloud_error("compute", str(exc), **kwargs)
result = {"error": str(exc)}
except SerializationError as exc:
result = {"error": "The object model could not be parsed. ({})".format(str(exc))}
return result
def delete(name, resource_group, **kwargs):
"""
.. versionadded:: 2.1.0
Delete an image.
:param name: The image to delete.
:param resource_group: The resource group name assigned to the image.
CLI Example:
.. code-block:: bash
salt-call azurerm_compute_image.delete testimage testgroup
"""
result = False
compconn = saltext.azurerm.utils.azurerm.get_client("compute", **kwargs)
try:
image = compconn.images.begin_delete(resource_group_name=resource_group, image_name=name)
image.wait()
result = True
except HttpResponseError as exc:
saltext.azurerm.utils.azurerm.log_cloud_error("compute", str(exc), **kwargs)
result = {"error": str(exc)}
return result
def get(name, resource_group, **kwargs):
"""
.. versionadded:: 2.1.0
Get properties of the specified image.
:param name: The name of the image to query.
:param resource_group: The resource group name assigned to the image.
CLI Example:
.. code-block:: bash
salt-call azurerm_compute_image.get testimage testgroup
"""
result = {}
compconn = saltext.azurerm.utils.azurerm.get_client("compute", **kwargs)
try:
image = compconn.images.get(resource_group_name=resource_group, image_name=name)
result = image.as_dict()
except HttpResponseError as exc:
saltext.azurerm.utils.azurerm.log_cloud_error("compute", str(exc), **kwargs)
result = {"error": str(exc)}
return result
def list_(resource_group=None, **kwargs):
"""
.. versionadded:: 2.1.0
Gets the list of Images in the subscription.
:param resource_group: The name of the resource group to limit the results.
CLI Example:
.. code-block:: bash
salt-call azurerm_compute_image.list
"""
result = {}
compconn = saltext.azurerm.utils.azurerm.get_client("compute", **kwargs)
try:
if resource_group:
images = saltext.azurerm.utils.azurerm.paged_object_to_list(
compconn.images.list_by_resource_group(resource_group_name=resource_group)
)
else:
images = saltext.azurerm.utils.azurerm.paged_object_to_list(compconn.images.list())
for image in images:
result[image["name"]] = image
except HttpResponseError as exc:
saltext.azurerm.utils.azurerm.log_cloud_error("compute", str(exc), **kwargs)
result = {"error": str(exc)}
return result | /saltext.azurerm-4.0.1.tar.gz/saltext.azurerm-4.0.1/src/saltext/azurerm/modules/azurerm_compute_image.py | 0.699357 | 0.198452 | azurerm_compute_image.py | pypi |
import logging
import salt.utils.dictdiffer # pylint: disable=import-error
import saltext.azurerm.utils.azurerm
__virtualname__ = "azurerm_network"
log = logging.getLogger(__name__)
def __virtual__():
"""
Only make this state available if the azurerm_network module is available.
"""
if "azurerm_network.check_ip_address_availability" in __salt__:
return __virtualname__
return (False, "azurerm_network module could not be loaded")
def virtual_network_present(
name,
address_prefixes,
resource_group,
dns_servers=None,
tags=None,
connection_auth=None,
**kwargs
):
"""
.. versionadded:: 2019.2.0
Ensure a virtual network exists.
:param name:
Name of the virtual network.
:param resource_group:
The resource group assigned to the virtual network.
:param address_prefixes:
A list of CIDR blocks which can be used by subnets within the virtual network.
:param dns_servers:
A list of DNS server addresses.
:param tags:
A dictionary of strings can be passed as tag metadata to the virtual network object.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure virtual network exists:
azurerm_network.virtual_network_present:
- name: vnet1
- resource_group: group1
- address_prefixes:
- '10.0.0.0/8'
- '192.168.0.0/16'
- dns_servers:
- '8.8.8.8'
- tags:
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
- require:
- azurerm_resource: Ensure resource group exists
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if not isinstance(connection_auth, dict):
ret["comment"] = "Connection information must be specified via connection_auth dictionary!"
return ret
vnet = __salt__["azurerm_network.virtual_network_get"](
name, resource_group, azurerm_log_level="info", **connection_auth
)
if "error" not in vnet:
tag_changes = salt.utils.dictdiffer.deep_diff(vnet.get("tags", {}), tags or {})
if tag_changes:
ret["changes"]["tags"] = tag_changes
dns_changes = set(dns_servers or []).symmetric_difference(
set(vnet.get("dhcp_options", {}).get("dns_servers", []))
)
if dns_changes:
ret["changes"]["dns_servers"] = {
"old": vnet.get("dhcp_options", {}).get("dns_servers", []),
"new": dns_servers,
}
addr_changes = set(address_prefixes or []).symmetric_difference(
set(vnet.get("address_space", {}).get("address_prefixes", []))
)
if addr_changes:
ret["changes"]["address_space"] = {
"address_prefixes": {
"old": vnet.get("address_space", {}).get("address_prefixes", []),
"new": address_prefixes,
}
}
if kwargs.get("enable_ddos_protection", False) != vnet.get("enable_ddos_protection"):
ret["changes"]["enable_ddos_protection"] = {
"old": vnet.get("enable_ddos_protection"),
"new": kwargs.get("enable_ddos_protection"),
}
if kwargs.get("enable_vm_protection", False) != vnet.get("enable_vm_protection"):
ret["changes"]["enable_vm_protection"] = {
"old": vnet.get("enable_vm_protection"),
"new": kwargs.get("enable_vm_protection"),
}
if not ret["changes"]:
ret["result"] = True
ret["comment"] = "Virtual network {} is already present.".format(name)
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Virtual network {} would be updated.".format(name)
return ret
else:
ret["changes"] = {
"old": {},
"new": {
"name": name,
"resource_group": resource_group,
"address_space": {"address_prefixes": address_prefixes},
"dhcp_options": {"dns_servers": dns_servers},
"enable_ddos_protection": kwargs.get("enable_ddos_protection", False),
"enable_vm_protection": kwargs.get("enable_vm_protection", False),
"tags": tags,
},
}
if __opts__["test"]:
ret["comment"] = "Virtual network {} would be created.".format(name)
ret["result"] = None
return ret
vnet_kwargs = kwargs.copy()
vnet_kwargs.update(connection_auth)
vnet = __salt__["azurerm_network.virtual_network_create_or_update"](
name=name,
resource_group=resource_group,
address_prefixes=address_prefixes,
dns_servers=dns_servers,
tags=tags,
**vnet_kwargs
)
if "error" not in vnet:
ret["result"] = True
ret["comment"] = "Virtual network {} has been created.".format(name)
return ret
ret["comment"] = "Failed to create virtual network {}! ({})".format(name, vnet.get("error"))
return ret
def virtual_network_absent(name, resource_group, connection_auth=None):
"""
.. versionadded:: 2019.2.0
Ensure a virtual network does not exist in the resource group.
:param name:
Name of the virtual network.
:param resource_group:
The resource group assigned to the virtual network.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if not isinstance(connection_auth, dict):
ret["comment"] = "Connection information must be specified via connection_auth dictionary!"
return ret
vnet = __salt__["azurerm_network.virtual_network_get"](
name, resource_group, azurerm_log_level="info", **connection_auth
)
if "error" in vnet:
ret["result"] = True
ret["comment"] = "Virtual network {} was not found.".format(name)
return ret
elif __opts__["test"]:
ret["comment"] = "Virtual network {} would be deleted.".format(name)
ret["result"] = None
ret["changes"] = {
"old": vnet,
"new": {},
}
return ret
deleted = __salt__["azurerm_network.virtual_network_delete"](
name, resource_group, **connection_auth
)
if deleted:
ret["result"] = True
ret["comment"] = "Virtual network {} has been deleted.".format(name)
ret["changes"] = {"old": vnet, "new": {}}
return ret
ret["comment"] = "Failed to delete virtual network {}!".format(name)
return ret
def subnet_present(
name,
address_prefix,
virtual_network,
resource_group,
security_group=None,
route_table=None,
connection_auth=None,
**kwargs
):
"""
.. versionadded:: 2019.2.0
Ensure a subnet exists.
:param name:
Name of the subnet.
:param address_prefix:
A CIDR block used by the subnet within the virtual network.
:param virtual_network:
Name of the existing virtual network to contain the subnet.
:param resource_group:
The resource group assigned to the virtual network.
:param security_group:
The name of the existing network security group to assign to the subnet.
:param route_table:
The name of the existing route table to assign to the subnet.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure subnet exists:
azurerm_network.subnet_present:
- name: vnet1_sn1
- virtual_network: vnet1
- resource_group: group1
- address_prefix: '192.168.1.0/24'
- security_group: nsg1
- route_table: rt1
- connection_auth: {{ profile }}
- require:
- azurerm_network: Ensure virtual network exists
- azurerm_network: Ensure network security group exists
- azurerm_network: Ensure route table exists
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if not isinstance(connection_auth, dict):
ret["comment"] = "Connection information must be specified via connection_auth dictionary!"
return ret
snet = __salt__["azurerm_network.subnet_get"](
name, virtual_network, resource_group, azurerm_log_level="info", **connection_auth
)
if "error" not in snet:
if address_prefix != snet.get("address_prefix"):
ret["changes"]["address_prefix"] = {
"old": snet.get("address_prefix"),
"new": address_prefix,
}
nsg_name = None
if snet.get("network_security_group"):
nsg_name = snet["network_security_group"]["id"].split("/")[-1]
if security_group and (security_group != nsg_name):
ret["changes"]["network_security_group"] = {
"old": nsg_name,
"new": security_group,
}
rttbl_name = None
if snet.get("route_table"):
rttbl_name = snet["route_table"]["id"].split("/")[-1]
if route_table and (route_table != rttbl_name):
ret["changes"]["route_table"] = {"old": rttbl_name, "new": route_table}
if not ret["changes"]:
ret["result"] = True
ret["comment"] = "Subnet {} is already present.".format(name)
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Subnet {} would be updated.".format(name)
return ret
else:
ret["changes"] = {
"old": {},
"new": {
"name": name,
"address_prefix": address_prefix,
"network_security_group": security_group,
"route_table": route_table,
},
}
if __opts__["test"]:
ret["comment"] = "Subnet {} would be created.".format(name)
ret["result"] = None
return ret
snet_kwargs = kwargs.copy()
snet_kwargs.update(connection_auth)
snet = __salt__["azurerm_network.subnet_create_or_update"](
name=name,
virtual_network=virtual_network,
resource_group=resource_group,
address_prefix=address_prefix,
network_security_group=security_group,
route_table=route_table,
**snet_kwargs
)
if "error" not in snet:
ret["result"] = True
ret["comment"] = "Subnet {} has been created.".format(name)
return ret
ret["comment"] = "Failed to create subnet {}! ({})".format(name, snet.get("error"))
return ret
def subnet_absent(name, virtual_network, resource_group, connection_auth=None):
"""
.. versionadded:: 2019.2.0
Ensure a virtual network does not exist in the virtual network.
:param name:
Name of the subnet.
:param virtual_network:
Name of the existing virtual network containing the subnet.
:param resource_group:
The resource group assigned to the virtual network.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if not isinstance(connection_auth, dict):
ret["comment"] = "Connection information must be specified via connection_auth dictionary!"
return ret
snet = __salt__["azurerm_network.subnet_get"](
name, virtual_network, resource_group, azurerm_log_level="info", **connection_auth
)
if "error" in snet:
ret["result"] = True
ret["comment"] = "Subnet {} was not found.".format(name)
return ret
elif __opts__["test"]:
ret["comment"] = "Subnet {} would be deleted.".format(name)
ret["result"] = None
ret["changes"] = {
"old": snet,
"new": {},
}
return ret
deleted = __salt__["azurerm_network.subnet_delete"](
name, virtual_network, resource_group, **connection_auth
)
if deleted:
ret["result"] = True
ret["comment"] = "Subnet {} has been deleted.".format(name)
ret["changes"] = {"old": snet, "new": {}}
return ret
ret["comment"] = "Failed to delete subnet {}!".format(name)
return ret
def network_security_group_present(
name, resource_group, tags=None, security_rules=None, connection_auth=None, **kwargs
):
"""
.. versionadded:: 2019.2.0
Ensure a network security group exists.
:param name:
Name of the network security group.
:param resource_group:
The resource group assigned to the network security group.
:param tags:
A dictionary of strings can be passed as tag metadata to the network security group object.
:param security_rules: An optional list of dictionaries representing valid SecurityRule objects. See the
documentation for the security_rule_present state or security_rule_create_or_update execution module
for more information on required and optional parameters for security rules. The rules are only
managed if this parameter is present. When this parameter is absent, implemented rules will not be removed,
and will merely become unmanaged.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure network security group exists:
azurerm_network.network_security_group_present:
- name: nsg1
- resource_group: group1
- security_rules:
- name: nsg1_rule1
priority: 100
protocol: tcp
access: allow
direction: outbound
source_address_prefix: virtualnetwork
destination_address_prefix: internet
source_port_range: '*'
destination_port_range: '*'
- name: nsg1_rule2
priority: 101
protocol: tcp
access: allow
direction: inbound
source_address_prefix: internet
destination_address_prefix: virtualnetwork
source_port_range: '*'
destination_port_ranges:
- '80'
- '443'
- tags:
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
- require:
- azurerm_resource: Ensure resource group exists
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if not isinstance(connection_auth, dict):
ret["comment"] = "Connection information must be specified via connection_auth dictionary!"
return ret
nsg = __salt__["azurerm_network.network_security_group_get"](
name, resource_group, azurerm_log_level="info", **connection_auth
)
if "error" not in nsg:
tag_changes = salt.utils.dictdiffer.deep_diff(nsg.get("tags", {}), tags or {})
if tag_changes:
ret["changes"]["tags"] = tag_changes
if security_rules:
comp_ret = saltext.azurerm.utils.azurerm.compare_list_of_dicts(
nsg.get("security_rules", []), security_rules
)
if comp_ret.get("comment"):
ret["comment"] = '"security_rules" {}'.format(comp_ret["comment"])
return ret
if comp_ret.get("changes"):
ret["changes"]["security_rules"] = comp_ret["changes"]
if not ret["changes"]:
ret["result"] = True
ret["comment"] = "Network security group {} is already present.".format(name)
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Network security group {} would be updated.".format(name)
return ret
else:
ret["changes"] = {
"old": {},
"new": {
"name": name,
"resource_group": resource_group,
"tags": tags,
"security_rules": security_rules,
},
}
if __opts__["test"]:
ret["comment"] = "Network security group {} would be created.".format(name)
ret["result"] = None
return ret
nsg_kwargs = kwargs.copy()
nsg_kwargs.update(connection_auth)
nsg = __salt__["azurerm_network.network_security_group_create_or_update"](
name=name,
resource_group=resource_group,
tags=tags,
security_rules=security_rules,
**nsg_kwargs
)
if "error" not in nsg:
ret["result"] = True
ret["comment"] = "Network security group {} has been created.".format(name)
return ret
ret["comment"] = "Failed to create network security group {}! ({})".format(
name, nsg.get("error")
)
return ret
def network_security_group_absent(name, resource_group, connection_auth=None):
"""
.. versionadded:: 2019.2.0
Ensure a network security group does not exist in the resource group.
:param name:
Name of the network security group.
:param resource_group:
The resource group assigned to the network security group.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if not isinstance(connection_auth, dict):
ret["comment"] = "Connection information must be specified via connection_auth dictionary!"
return ret
nsg = __salt__["azurerm_network.network_security_group_get"](
name, resource_group, azurerm_log_level="info", **connection_auth
)
if "error" in nsg:
ret["result"] = True
ret["comment"] = "Network security group {} was not found.".format(name)
return ret
elif __opts__["test"]:
ret["comment"] = "Network security group {} would be deleted.".format(name)
ret["result"] = None
ret["changes"] = {
"old": nsg,
"new": {},
}
return ret
deleted = __salt__["azurerm_network.network_security_group_delete"](
name, resource_group, **connection_auth
)
if deleted:
ret["result"] = True
ret["comment"] = "Network security group {} has been deleted.".format(name)
ret["changes"] = {"old": nsg, "new": {}}
return ret
ret["comment"] = "Failed to delete network security group {}!".format(name)
return ret
def security_rule_present(
name,
access,
direction,
priority,
protocol,
security_group,
resource_group,
destination_address_prefix=None,
destination_port_range=None,
source_address_prefix=None,
source_port_range=None,
description=None,
destination_address_prefixes=None,
destination_port_ranges=None,
source_address_prefixes=None,
source_port_ranges=None,
connection_auth=None,
**kwargs
):
"""
.. versionadded:: 2019.2.0
Ensure a security rule exists.
:param name:
Name of the security rule.
:param access:
'allow' or 'deny'
:param direction:
'inbound' or 'outbound'
:param priority:
Integer between 100 and 4096 used for ordering rule application.
:param protocol:
'tcp', 'udp', or '*'
:param security_group:
The name of the existing network security group to contain the security rule.
:param resource_group:
The resource group assigned to the network security group.
:param description:
Optional description of the security rule.
:param destination_address_prefix:
The CIDR or destination IP range. Asterix '*' can also be used to match all destination IPs.
Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used.
If this is an ingress rule, specifies where network traffic originates from.
:param destination_port_range:
The destination port or range. Integer or range between 0 and 65535. Asterix '*'
can also be used to match all ports.
:param source_address_prefix:
The CIDR or source IP range. Asterix '*' can also be used to match all source IPs.
Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used.
If this is an ingress rule, specifies where network traffic originates from.
:param source_port_range:
The source port or range. Integer or range between 0 and 65535. Asterix '*'
can also be used to match all ports.
:param destination_address_prefixes:
A list of destination_address_prefix values. This parameter overrides destination_address_prefix
and will cause any value entered there to be ignored.
:param destination_port_ranges:
A list of destination_port_range values. This parameter overrides destination_port_range
and will cause any value entered there to be ignored.
:param source_address_prefixes:
A list of source_address_prefix values. This parameter overrides source_address_prefix
and will cause any value entered there to be ignored.
:param source_port_ranges:
A list of source_port_range values. This parameter overrides source_port_range
and will cause any value entered there to be ignored.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure security rule exists:
azurerm_network.security_rule_present:
- name: nsg1_rule2
- security_group: nsg1
- resource_group: group1
- priority: 101
- protocol: tcp
- access: allow
- direction: inbound
- source_address_prefix: internet
- destination_address_prefix: virtualnetwork
- source_port_range: '*'
- destination_port_ranges:
- '80'
- '443'
- connection_auth: {{ profile }}
- require:
- azurerm_network: Ensure network security group exists
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if not isinstance(connection_auth, dict):
ret["comment"] = "Connection information must be specified via connection_auth dictionary!"
return ret
exclusive_params = [
("source_port_ranges", "source_port_range"),
("source_address_prefixes", "source_address_prefix"),
("destination_port_ranges", "destination_port_range"),
("destination_address_prefixes", "destination_address_prefix"),
]
for params in exclusive_params:
# pylint: disable=eval-used
if not eval(params[0]) and not eval(params[1]):
ret["comment"] = "Either the {} or {} parameter must be provided!".format(
params[0], params[1]
)
return ret
# pylint: disable=eval-used
if eval(params[0]):
# pylint: disable=eval-used
if not isinstance(eval(params[0]), list):
ret["comment"] = "The {} parameter must be a list!".format(params[0])
return ret
# pylint: disable=exec-used
exec("{} = None".format(params[1]))
rule = __salt__["azurerm_network.security_rule_get"](
name, security_group, resource_group, azurerm_log_level="info", **connection_auth
)
if "error" not in rule:
# access changes
if access.capitalize() != rule.get("access"):
ret["changes"]["access"] = {"old": rule.get("access"), "new": access}
# description changes
if description != rule.get("description"):
ret["changes"]["description"] = {
"old": rule.get("description"),
"new": description,
}
# direction changes
if direction.capitalize() != rule.get("direction"):
ret["changes"]["direction"] = {
"old": rule.get("direction"),
"new": direction,
}
# priority changes
if int(priority) != rule.get("priority"):
ret["changes"]["priority"] = {"old": rule.get("priority"), "new": priority}
# protocol changes
if protocol.lower() != rule.get("protocol", "").lower():
ret["changes"]["protocol"] = {"old": rule.get("protocol"), "new": protocol}
# destination_port_range changes
if destination_port_range != rule.get("destination_port_range"):
ret["changes"]["destination_port_range"] = {
"old": rule.get("destination_port_range"),
"new": destination_port_range,
}
# source_port_range changes
if source_port_range != rule.get("source_port_range"):
ret["changes"]["source_port_range"] = {
"old": rule.get("source_port_range"),
"new": source_port_range,
}
# destination_port_ranges changes
if sorted(destination_port_ranges or []) != sorted(rule.get("destination_port_ranges", [])):
ret["changes"]["destination_port_ranges"] = {
"old": rule.get("destination_port_ranges"),
"new": destination_port_ranges,
}
# source_port_ranges changes
if sorted(source_port_ranges or []) != sorted(rule.get("source_port_ranges", [])):
ret["changes"]["source_port_ranges"] = {
"old": rule.get("source_port_ranges"),
"new": source_port_ranges,
}
# destination_address_prefix changes
if (destination_address_prefix or "").lower() != rule.get(
"destination_address_prefix", ""
).lower():
ret["changes"]["destination_address_prefix"] = {
"old": rule.get("destination_address_prefix"),
"new": destination_address_prefix,
}
# source_address_prefix changes
if (source_address_prefix or "").lower() != rule.get("source_address_prefix", "").lower():
ret["changes"]["source_address_prefix"] = {
"old": rule.get("source_address_prefix"),
"new": source_address_prefix,
}
# destination_address_prefixes changes
if sorted(destination_address_prefixes or []) != sorted(
rule.get("destination_address_prefixes", [])
):
if len(destination_address_prefixes or []) != len(
rule.get("destination_address_prefixes", [])
):
ret["changes"]["destination_address_prefixes"] = {
"old": rule.get("destination_address_prefixes"),
"new": destination_address_prefixes,
}
else:
local_dst_addrs, remote_dst_addrs = (
sorted(destination_address_prefixes),
sorted(rule.get("destination_address_prefixes")),
)
for idx, val in enumerate(local_dst_addrs):
if val.lower() != remote_dst_addrs[idx].lower():
ret["changes"]["destination_address_prefixes"] = {
"old": rule.get("destination_address_prefixes"),
"new": destination_address_prefixes,
}
break
# source_address_prefixes changes
if sorted(source_address_prefixes or []) != sorted(rule.get("source_address_prefixes", [])):
if len(source_address_prefixes or []) != len(rule.get("source_address_prefixes", [])):
ret["changes"]["source_address_prefixes"] = {
"old": rule.get("source_address_prefixes"),
"new": source_address_prefixes,
}
else:
local_src_addrs, remote_src_addrs = (
sorted(source_address_prefixes),
sorted(rule.get("source_address_prefixes")),
)
for idx, val in enumerate(local_src_addrs):
if val.lower() != remote_src_addrs[idx].lower():
ret["changes"]["source_address_prefixes"] = {
"old": rule.get("source_address_prefixes"),
"new": source_address_prefixes,
}
break
if not ret["changes"]:
ret["result"] = True
ret["comment"] = "Security rule {} is already present.".format(name)
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Security rule {} would be updated.".format(name)
return ret
else:
ret["changes"] = {
"old": {},
"new": {
"name": name,
"access": access,
"description": description,
"direction": direction,
"priority": priority,
"protocol": protocol,
"destination_address_prefix": destination_address_prefix,
"destination_address_prefixes": destination_address_prefixes,
"destination_port_range": destination_port_range,
"destination_port_ranges": destination_port_ranges,
"source_address_prefix": source_address_prefix,
"source_address_prefixes": source_address_prefixes,
"source_port_range": source_port_range,
"source_port_ranges": source_port_ranges,
},
}
if __opts__["test"]:
ret["comment"] = "Security rule {} would be created.".format(name)
ret["result"] = None
return ret
rule_kwargs = kwargs.copy()
rule_kwargs.update(connection_auth)
rule = __salt__["azurerm_network.security_rule_create_or_update"](
name=name,
access=access,
description=description,
direction=direction,
priority=priority,
protocol=protocol,
security_group=security_group,
resource_group=resource_group,
destination_address_prefix=destination_address_prefix,
destination_address_prefixes=destination_address_prefixes,
destination_port_range=destination_port_range,
destination_port_ranges=destination_port_ranges,
source_address_prefix=source_address_prefix,
source_address_prefixes=source_address_prefixes,
source_port_range=source_port_range,
source_port_ranges=source_port_ranges,
**rule_kwargs
)
if "error" not in rule:
ret["result"] = True
ret["comment"] = "Security rule {} has been created.".format(name)
return ret
ret["comment"] = "Failed to create security rule {}! ({})".format(name, rule.get("error"))
return ret
def security_rule_absent(name, security_group, resource_group, connection_auth=None):
"""
.. versionadded:: 2019.2.0
Ensure a security rule does not exist in the network security group.
:param name:
Name of the security rule.
:param security_group:
The network security group containing the security rule.
:param resource_group:
The resource group assigned to the network security group.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if not isinstance(connection_auth, dict):
ret["comment"] = "Connection information must be specified via connection_auth dictionary!"
return ret
rule = __salt__["azurerm_network.security_rule_get"](
name, security_group, resource_group, azurerm_log_level="info", **connection_auth
)
if "error" in rule:
ret["result"] = True
ret["comment"] = "Security rule {} was not found.".format(name)
return ret
elif __opts__["test"]:
ret["comment"] = "Security rule {} would be deleted.".format(name)
ret["result"] = None
ret["changes"] = {
"old": rule,
"new": {},
}
return ret
deleted = __salt__["azurerm_network.security_rule_delete"](
name, security_group, resource_group, **connection_auth
)
if deleted:
ret["result"] = True
ret["comment"] = "Security rule {} has been deleted.".format(name)
ret["changes"] = {"old": rule, "new": {}}
return ret
ret["comment"] = "Failed to delete security rule {}!".format(name)
return ret
def load_balancer_present(
name,
resource_group,
sku=None,
frontend_ip_configurations=None,
backend_address_pools=None,
load_balancing_rules=None,
probes=None,
inbound_nat_rules=None,
inbound_nat_pools=None,
outbound_nat_rules=None,
tags=None,
connection_auth=None,
**kwargs
):
"""
.. versionadded:: 2019.2.0
Ensure a load balancer exists.
:param name:
Name of the load balancer.
:param resource_group:
The resource group assigned to the load balancer.
:param sku:
The load balancer SKU, which can be 'Basic' or 'Standard'.
:param tags:
A dictionary of strings can be passed as tag metadata to the load balancer object.
:param frontend_ip_configurations:
An optional list of dictionaries representing valid FrontendIPConfiguration objects. A frontend IP
configuration can be either private (using private IP address and subnet parameters) or public (using a
reference to a public IP address object). Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``private_ip_address``: The private IP address of the IP configuration. Required if
'private_ip_allocation_method' is 'Static'.
- ``private_ip_allocation_method``: The Private IP allocation method. Possible values are: 'Static' and
'Dynamic'.
- ``subnet``: Name of an existing subnet inside of which the frontend IP will reside.
- ``public_ip_address``: Name of an existing public IP address which will be assigned to the frontend IP object.
:param backend_address_pools:
An optional list of dictionaries representing valid BackendAddressPool objects. Only the 'name' parameter is
valid for a BackendAddressPool dictionary. All other parameters are read-only references from other objects
linking to the backend address pool. Inbound traffic is randomly load balanced across IPs in the backend IPs.
:param probes:
An optional list of dictionaries representing valid Probe objects. Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``protocol``: The protocol of the endpoint. Possible values are 'Http' or 'Tcp'. If 'Tcp' is specified, a
received ACK is required for the probe to be successful. If 'Http' is specified, a 200 OK response from the
specified URI is required for the probe to be successful.
- ``port``: The port for communicating the probe. Possible values range from 1 to 65535, inclusive.
- ``interval_in_seconds``: The interval, in seconds, for how frequently to probe the endpoint for health status.
Typically, the interval is slightly less than half the allocated timeout period (in seconds) which allows two
full probes before taking the instance out of rotation. The default value is 15, the minimum value is 5.
- ``number_of_probes``: The number of probes where if no response, will result in stopping further traffic from
being delivered to the endpoint. This values allows endpoints to be taken out of rotation faster or slower
than the typical times used in Azure.
- ``request_path``: The URI used for requesting health status from the VM. Path is required if a protocol is
set to 'Http'. Otherwise, it is not allowed. There is no default value.
:param load_balancing_rules:
An optional list of dictionaries representing valid LoadBalancingRule objects. Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``load_distribution``: The load distribution policy for this rule. Possible values are 'Default', 'SourceIP',
and 'SourceIPProtocol'.
- ``frontend_port``: The port for the external endpoint. Port numbers for each rule must be unique within the
Load Balancer. Acceptable values are between 0 and 65534. Note that value 0 enables 'Any Port'.
- ``backend_port``: The port used for internal connections on the endpoint. Acceptable values are between 0 and
65535. Note that value 0 enables 'Any Port'.
- ``idle_timeout_in_minutes``: The timeout for the TCP idle connection. The value can be set between 4 and 30
minutes. The default value is 4 minutes. This element is only used when the protocol is set to TCP.
- ``enable_floating_ip``: Configures a virtual machine's endpoint for the floating IP capability required
to configure a SQL AlwaysOn Availability Group. This setting is required when using the SQL AlwaysOn
Availability Groups in SQL server. This setting can't be changed after you create the endpoint.
- ``disable_outbound_snat``: Configures SNAT for the VMs in the backend pool to use the public IP address
specified in the frontend of the load balancing rule.
- ``frontend_ip_configuration``: Name of the frontend IP configuration object used by the load balancing rule
object.
- ``backend_address_pool``: Name of the backend address pool object used by the load balancing rule object.
Inbound traffic is randomly load balanced across IPs in the backend IPs.
- ``probe``: Name of the probe object used by the load balancing rule object.
:param inbound_nat_rules:
An optional list of dictionaries representing valid InboundNatRule objects. Defining inbound NAT rules on your
load balancer is mutually exclusive with defining an inbound NAT pool. Inbound NAT pools are referenced from
virtual machine scale sets. NICs that are associated with individual virtual machines cannot reference an
Inbound NAT pool. They have to reference individual inbound NAT rules. Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``frontend_ip_configuration``: Name of the frontend IP configuration object used by the inbound NAT rule
object.
- ``protocol``: Possible values include 'Udp', 'Tcp', or 'All'.
- ``frontend_port``: The port for the external endpoint. Port numbers for each rule must be unique within the
Load Balancer. Acceptable values range from 1 to 65534.
- ``backend_port``: The port used for the internal endpoint. Acceptable values range from 1 to 65535.
- ``idle_timeout_in_minutes``: The timeout for the TCP idle connection. The value can be set between 4 and 30
minutes. The default value is 4 minutes. This element is only used when the protocol is set to TCP.
- ``enable_floating_ip``: Configures a virtual machine's endpoint for the floating IP capability required
to configure a SQL AlwaysOn Availability Group. This setting is required when using the SQL AlwaysOn
Availability Groups in SQL server. This setting can't be changed after you create the endpoint.
:param inbound_nat_pools:
An optional list of dictionaries representing valid InboundNatPool objects. They define an external port range
for inbound NAT to a single backend port on NICs associated with a load balancer. Inbound NAT rules are created
automatically for each NIC associated with the Load Balancer using an external port from this range. Defining an
Inbound NAT pool on your Load Balancer is mutually exclusive with defining inbound NAT rules. Inbound NAT pools
are referenced from virtual machine scale sets. NICs that are associated with individual virtual machines cannot
reference an inbound NAT pool. They have to reference individual inbound NAT rules. Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``frontend_ip_configuration``: Name of the frontend IP configuration object used by the inbound NAT pool
object.
- ``protocol``: Possible values include 'Udp', 'Tcp', or 'All'.
- ``frontend_port_range_start``: The first port number in the range of external ports that will be used to
provide Inbound NAT to NICs associated with a load balancer. Acceptable values range between 1 and 65534.
- ``frontend_port_range_end``: The last port number in the range of external ports that will be used to
provide Inbound NAT to NICs associated with a load balancer. Acceptable values range between 1 and 65535.
- ``backend_port``: The port used for internal connections to the endpoint. Acceptable values are between 1 and
65535.
:param outbound_nat_rules:
An optional list of dictionaries representing valid OutboundNatRule objects. Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``frontend_ip_configuration``: Name of the frontend IP configuration object used by the outbound NAT rule
object.
- ``backend_address_pool``: Name of the backend address pool object used by the outbound NAT rule object.
Outbound traffic is randomly load balanced across IPs in the backend IPs.
- ``allocated_outbound_ports``: The number of outbound ports to be used for NAT.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure load balancer exists:
azurerm_network.load_balancer_present:
- name: lb1
- resource_group: group1
- location: eastus
- frontend_ip_configurations:
- name: lb1_feip1
public_ip_address: pub_ip1
- backend_address_pools:
- name: lb1_bepool1
- probes:
- name: lb1_webprobe1
protocol: tcp
port: 80
interval_in_seconds: 5
number_of_probes: 2
- load_balancing_rules:
- name: lb1_webprobe1
protocol: tcp
frontend_port: 80
backend_port: 80
idle_timeout_in_minutes: 4
frontend_ip_configuration: lb1_feip1
backend_address_pool: lb1_bepool1
probe: lb1_webprobe1
- tags:
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
- require:
- azurerm_resource: Ensure resource group exists
- azurerm_network: Ensure public IP exists
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if not isinstance(connection_auth, dict):
ret["comment"] = "Connection information must be specified via connection_auth dictionary!"
return ret
if sku:
sku = {"name": sku.capitalize()}
load_bal = __salt__["azurerm_network.load_balancer_get"](
name, resource_group, azurerm_log_level="info", **connection_auth
)
if "error" not in load_bal:
# tag changes
tag_changes = salt.utils.dictdiffer.deep_diff(load_bal.get("tags", {}), tags or {})
if tag_changes:
ret["changes"]["tags"] = tag_changes
# sku changes
if sku:
sku_changes = salt.utils.dictdiffer.deep_diff(load_bal.get("sku", {}), sku)
if sku_changes:
ret["changes"]["sku"] = sku_changes
# frontend_ip_configurations changes
if frontend_ip_configurations:
comp_ret = saltext.azurerm.utils.azurerm.compare_list_of_dicts(
load_bal.get("frontend_ip_configurations", []),
frontend_ip_configurations,
["public_ip_address", "subnet"],
)
if comp_ret.get("comment"):
ret["comment"] = '"frontend_ip_configurations" {}'.format(comp_ret["comment"])
return ret
if comp_ret.get("changes"):
ret["changes"]["frontend_ip_configurations"] = comp_ret["changes"]
# backend_address_pools changes
if backend_address_pools:
comp_ret = saltext.azurerm.utils.azurerm.compare_list_of_dicts(
load_bal.get("backend_address_pools", []), backend_address_pools
)
if comp_ret.get("comment"):
ret["comment"] = '"backend_address_pools" {}'.format(comp_ret["comment"])
return ret
if comp_ret.get("changes"):
ret["changes"]["backend_address_pools"] = comp_ret["changes"]
# probes changes
if probes:
comp_ret = saltext.azurerm.utils.azurerm.compare_list_of_dicts(
load_bal.get("probes", []), probes
)
if comp_ret.get("comment"):
ret["comment"] = '"probes" {}'.format(comp_ret["comment"])
return ret
if comp_ret.get("changes"):
ret["changes"]["probes"] = comp_ret["changes"]
# load_balancing_rules changes
if load_balancing_rules:
comp_ret = saltext.azurerm.utils.azurerm.compare_list_of_dicts(
load_bal.get("load_balancing_rules", []),
load_balancing_rules,
["frontend_ip_configuration", "backend_address_pool", "probe"],
)
if comp_ret.get("comment"):
ret["comment"] = '"load_balancing_rules" {}'.format(comp_ret["comment"])
return ret
if comp_ret.get("changes"):
ret["changes"]["load_balancing_rules"] = comp_ret["changes"]
# inbound_nat_rules changes
if inbound_nat_rules:
comp_ret = saltext.azurerm.utils.azurerm.compare_list_of_dicts(
load_bal.get("inbound_nat_rules", []),
inbound_nat_rules,
["frontend_ip_configuration"],
)
if comp_ret.get("comment"):
ret["comment"] = '"inbound_nat_rules" {}'.format(comp_ret["comment"])
return ret
if comp_ret.get("changes"):
ret["changes"]["inbound_nat_rules"] = comp_ret["changes"]
# inbound_nat_pools changes
if inbound_nat_pools:
comp_ret = saltext.azurerm.utils.azurerm.compare_list_of_dicts(
load_bal.get("inbound_nat_pools", []),
inbound_nat_pools,
["frontend_ip_configuration"],
)
if comp_ret.get("comment"):
ret["comment"] = '"inbound_nat_pools" {}'.format(comp_ret["comment"])
return ret
if comp_ret.get("changes"):
ret["changes"]["inbound_nat_pools"] = comp_ret["changes"]
# outbound_nat_rules changes
if outbound_nat_rules:
comp_ret = saltext.azurerm.utils.azurerm.compare_list_of_dicts(
load_bal.get("outbound_nat_rules", []),
outbound_nat_rules,
["frontend_ip_configuration"],
)
if comp_ret.get("comment"):
ret["comment"] = '"outbound_nat_rules" {}'.format(comp_ret["comment"])
return ret
if comp_ret.get("changes"):
ret["changes"]["outbound_nat_rules"] = comp_ret["changes"]
if not ret["changes"]:
ret["result"] = True
ret["comment"] = "Load balancer {} is already present.".format(name)
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Load balancer {} would be updated.".format(name)
return ret
else:
ret["changes"] = {
"old": {},
"new": {
"name": name,
"sku": sku,
"tags": tags,
"frontend_ip_configurations": frontend_ip_configurations,
"backend_address_pools": backend_address_pools,
"load_balancing_rules": load_balancing_rules,
"probes": probes,
"inbound_nat_rules": inbound_nat_rules,
"inbound_nat_pools": inbound_nat_pools,
"outbound_nat_rules": outbound_nat_rules,
},
}
if __opts__["test"]:
ret["comment"] = "Load balancer {} would be created.".format(name)
ret["result"] = None
return ret
lb_kwargs = kwargs.copy()
lb_kwargs.update(connection_auth)
load_bal = __salt__["azurerm_network.load_balancer_create_or_update"](
name=name,
resource_group=resource_group,
sku=sku,
tags=tags,
frontend_ip_configurations=frontend_ip_configurations,
backend_address_pools=backend_address_pools,
load_balancing_rules=load_balancing_rules,
probes=probes,
inbound_nat_rules=inbound_nat_rules,
inbound_nat_pools=inbound_nat_pools,
outbound_nat_rules=outbound_nat_rules,
**lb_kwargs
)
if "error" not in load_bal:
ret["result"] = True
ret["comment"] = "Load balancer {} has been created.".format(name)
return ret
ret["comment"] = "Failed to create load balancer {}! ({})".format(name, load_bal.get("error"))
return ret
def load_balancer_absent(name, resource_group, connection_auth=None):
"""
.. versionadded:: 2019.2.0
Ensure a load balancer does not exist in the resource group.
:param name:
Name of the load balancer.
:param resource_group:
The resource group assigned to the load balancer.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if not isinstance(connection_auth, dict):
ret["comment"] = "Connection information must be specified via connection_auth dictionary!"
return ret
load_bal = __salt__["azurerm_network.load_balancer_get"](
name, resource_group, azurerm_log_level="info", **connection_auth
)
if "error" in load_bal:
ret["result"] = True
ret["comment"] = "Load balancer {} was not found.".format(name)
return ret
elif __opts__["test"]:
ret["comment"] = "Load balancer {} would be deleted.".format(name)
ret["result"] = None
ret["changes"] = {
"old": load_bal,
"new": {},
}
return ret
deleted = __salt__["azurerm_network.load_balancer_delete"](
name, resource_group, **connection_auth
)
if deleted:
ret["result"] = True
ret["comment"] = "Load balancer {} has been deleted.".format(name)
ret["changes"] = {"old": load_bal, "new": {}}
return ret
ret["comment"] = "Failed to delete load balancer {}!".format(name)
return ret
def public_ip_address_present(
name,
resource_group,
tags=None,
sku=None,
public_ip_allocation_method=None,
public_ip_address_version=None,
dns_settings=None,
idle_timeout_in_minutes=None,
connection_auth=None,
**kwargs
):
"""
.. versionadded:: 2019.2.0
Ensure a public IP address exists.
:param name:
Name of the public IP address.
:param resource_group:
The resource group assigned to the public IP address.
:param dns_settings:
An optional dictionary representing a valid PublicIPAddressDnsSettings object. Parameters include
'domain_name_label' and 'reverse_fqdn', which accept strings. The 'domain_name_label' parameter is concatenated
with the regionalized DNS zone make up the fully qualified domain name associated with the public IP address.
If a domain name label is specified, an A DNS record is created for the public IP in the Microsoft Azure DNS
system. The 'reverse_fqdn' parameter is a user-visible, fully qualified domain name that resolves to this public
IP address. If the reverse FQDN is specified, then a PTR DNS record is created pointing from the IP address in
the in-addr.arpa domain to the reverse FQDN.
:param sku:
The public IP address SKU, which can be 'Basic' or 'Standard'.
:param public_ip_allocation_method:
The public IP allocation method. Possible values are: 'Static' and 'Dynamic'.
:param public_ip_address_version:
The public IP address version. Possible values are: 'IPv4' and 'IPv6'.
:param idle_timeout_in_minutes:
An integer representing the idle timeout of the public IP address.
:param tags:
A dictionary of strings can be passed as tag metadata to the public IP address object.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure public IP exists:
azurerm_network.public_ip_address_present:
- name: pub_ip1
- resource_group: group1
- dns_settings:
domain_name_label: decisionlab-ext-test-label
- sku: basic
- public_ip_allocation_method: static
- public_ip_address_version: ipv4
- idle_timeout_in_minutes: 4
- tags:
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
- require:
- azurerm_resource: Ensure resource group exists
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if not isinstance(connection_auth, dict):
ret["comment"] = "Connection information must be specified via connection_auth dictionary!"
return ret
if sku:
sku = {"name": sku.capitalize()}
pub_ip = __salt__["azurerm_network.public_ip_address_get"](
name, resource_group, azurerm_log_level="info", **connection_auth
)
if "error" not in pub_ip:
# tag changes
tag_changes = salt.utils.dictdiffer.deep_diff(pub_ip.get("tags", {}), tags or {})
if tag_changes:
ret["changes"]["tags"] = tag_changes
# dns_settings changes
if dns_settings:
if not isinstance(dns_settings, dict):
ret["comment"] = "DNS settings must be provided as a dictionary!"
return ret
for key in dns_settings:
if dns_settings[key] != pub_ip.get("dns_settings", {}).get(key):
ret["changes"]["dns_settings"] = {
"old": pub_ip.get("dns_settings"),
"new": dns_settings,
}
break
# sku changes
if sku:
sku_changes = salt.utils.dictdiffer.deep_diff(pub_ip.get("sku", {}), sku)
if sku_changes:
ret["changes"]["sku"] = sku_changes
# public_ip_allocation_method changes
if public_ip_allocation_method:
if public_ip_allocation_method.capitalize() != pub_ip.get(
"public_ip_allocation_method"
):
ret["changes"]["public_ip_allocation_method"] = {
"old": pub_ip.get("public_ip_allocation_method"),
"new": public_ip_allocation_method,
}
# public_ip_address_version changes
if public_ip_address_version:
if (
public_ip_address_version.lower()
!= pub_ip.get("public_ip_address_version", "").lower()
):
ret["changes"]["public_ip_address_version"] = {
"old": pub_ip.get("public_ip_address_version"),
"new": public_ip_address_version,
}
# idle_timeout_in_minutes changes
if idle_timeout_in_minutes and (
int(idle_timeout_in_minutes) != pub_ip.get("idle_timeout_in_minutes")
):
ret["changes"]["idle_timeout_in_minutes"] = {
"old": pub_ip.get("idle_timeout_in_minutes"),
"new": idle_timeout_in_minutes,
}
if not ret["changes"]:
ret["result"] = True
ret["comment"] = "Public IP address {} is already present.".format(name)
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Public IP address {} would be updated.".format(name)
return ret
else:
ret["changes"] = {
"old": {},
"new": {
"name": name,
"tags": tags,
"dns_settings": dns_settings,
"sku": sku,
"public_ip_allocation_method": public_ip_allocation_method,
"public_ip_address_version": public_ip_address_version,
"idle_timeout_in_minutes": idle_timeout_in_minutes,
},
}
if __opts__["test"]:
ret["comment"] = "Public IP address {} would be created.".format(name)
ret["result"] = None
return ret
pub_ip_kwargs = kwargs.copy()
pub_ip_kwargs.update(connection_auth)
pub_ip = __salt__["azurerm_network.public_ip_address_create_or_update"](
name=name,
resource_group=resource_group,
sku=sku,
tags=tags,
dns_settings=dns_settings,
public_ip_allocation_method=public_ip_allocation_method,
public_ip_address_version=public_ip_address_version,
idle_timeout_in_minutes=idle_timeout_in_minutes,
**pub_ip_kwargs
)
if "error" not in pub_ip:
ret["result"] = True
ret["comment"] = "Public IP address {} has been created.".format(name)
return ret
ret["comment"] = "Failed to create public IP address {}! ({})".format(name, pub_ip.get("error"))
return ret
def public_ip_address_absent(name, resource_group, connection_auth=None):
"""
.. versionadded:: 2019.2.0
Ensure a public IP address does not exist in the resource group.
:param name:
Name of the public IP address.
:param resource_group:
The resource group assigned to the public IP address.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if not isinstance(connection_auth, dict):
ret["comment"] = "Connection information must be specified via connection_auth dictionary!"
return ret
pub_ip = __salt__["azurerm_network.public_ip_address_get"](
name, resource_group, azurerm_log_level="info", **connection_auth
)
if "error" in pub_ip:
ret["result"] = True
ret["comment"] = "Public IP address {} was not found.".format(name)
return ret
elif __opts__["test"]:
ret["comment"] = "Public IP address {} would be deleted.".format(name)
ret["result"] = None
ret["changes"] = {
"old": pub_ip,
"new": {},
}
return ret
deleted = __salt__["azurerm_network.public_ip_address_delete"](
name, resource_group, **connection_auth
)
if deleted:
ret["result"] = True
ret["comment"] = "Public IP address {} has been deleted.".format(name)
ret["changes"] = {"old": pub_ip, "new": {}}
return ret
ret["comment"] = "Failed to delete public IP address {}!".format(name)
return ret
def network_interface_present(
name,
ip_configurations,
subnet,
virtual_network,
resource_group,
tags=None,
virtual_machine=None,
network_security_group=None,
dns_settings=None,
mac_address=None,
primary=None,
enable_accelerated_networking=None,
enable_ip_forwarding=None,
connection_auth=None,
**kwargs
):
"""
.. versionadded:: 2019.2.0
Ensure a network interface exists.
:param name:
Name of the network interface.
:param ip_configurations:
A list of dictionaries representing valid NetworkInterfaceIPConfiguration objects. The 'name' key is required at
minimum. At least one IP Configuration must be present.
:param subnet:
Name of the existing subnet assigned to the network interface.
:param virtual_network:
Name of the existing virtual network containing the subnet.
:param resource_group:
The resource group assigned to the virtual network.
:param tags:
A dictionary of strings can be passed as tag metadata to the network interface object.
:param network_security_group:
The name of the existing network security group to assign to the network interface.
:param virtual_machine:
The name of the existing virtual machine to assign to the network interface.
:param dns_settings:
An optional dictionary representing a valid NetworkInterfaceDnsSettings object. Valid parameters are:
- ``dns_servers``: List of DNS server IP addresses. Use 'AzureProvidedDNS' to switch to Azure provided DNS
resolution. 'AzureProvidedDNS' value cannot be combined with other IPs, it must be the only value in
dns_servers collection.
- ``internal_dns_name_label``: Relative DNS name for this NIC used for internal communications between VMs in
the same virtual network.
- ``internal_fqdn``: Fully qualified DNS name supporting internal communications between VMs in the same virtual
network.
- ``internal_domain_name_suffix``: Even if internal_dns_name_label is not specified, a DNS entry is created for
the primary NIC of the VM. This DNS name can be constructed by concatenating the VM name with the value of
internal_domain_name_suffix.
:param mac_address:
Optional string containing the MAC address of the network interface.
:param primary:
Optional boolean allowing the interface to be set as the primary network interface on a virtual machine
with multiple interfaces attached.
:param enable_accelerated_networking:
Optional boolean indicating whether accelerated networking should be enabled for the interface.
:param enable_ip_forwarding:
Optional boolean indicating whether IP forwarding should be enabled for the interface.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure network interface exists:
azurerm_network.network_interface_present:
- name: iface1
- subnet: vnet1_sn1
- virtual_network: vnet1
- resource_group: group1
- ip_configurations:
- name: iface1_ipc1
public_ip_address: pub_ip2
- dns_settings:
internal_dns_name_label: decisionlab-int-test-label
- primary: True
- enable_accelerated_networking: True
- enable_ip_forwarding: False
- network_security_group: nsg1
- connection_auth: {{ profile }}
- require:
- azurerm_network: Ensure subnet exists
- azurerm_network: Ensure network security group exists
- azurerm_network: Ensure another public IP exists
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if not isinstance(connection_auth, dict):
ret["comment"] = "Connection information must be specified via connection_auth dictionary!"
return ret
iface = __salt__["azurerm_network.network_interface_get"](
name, resource_group, azurerm_log_level="info", **connection_auth
)
if "error" not in iface:
# tag changes
tag_changes = salt.utils.dictdiffer.deep_diff(iface.get("tags", {}), tags or {})
if tag_changes:
ret["changes"]["tags"] = tag_changes
# mac_address changes
if mac_address and (mac_address != iface.get("mac_address")):
ret["changes"]["mac_address"] = {
"old": iface.get("mac_address"),
"new": mac_address,
}
# primary changes
if primary is not None:
if primary != iface.get("primary", True):
ret["changes"]["primary"] = {
"old": iface.get("primary"),
"new": primary,
}
# enable_accelerated_networking changes
if enable_accelerated_networking is not None:
if enable_accelerated_networking != iface.get("enable_accelerated_networking"):
ret["changes"]["enable_accelerated_networking"] = {
"old": iface.get("enable_accelerated_networking"),
"new": enable_accelerated_networking,
}
# enable_ip_forwarding changes
if enable_ip_forwarding is not None:
if enable_ip_forwarding != iface.get("enable_ip_forwarding"):
ret["changes"]["enable_ip_forwarding"] = {
"old": iface.get("enable_ip_forwarding"),
"new": enable_ip_forwarding,
}
# network_security_group changes
nsg_name = None
if iface.get("network_security_group"):
nsg_name = iface["network_security_group"]["id"].split("/")[-1]
if network_security_group and (network_security_group != nsg_name):
ret["changes"]["network_security_group"] = {
"old": nsg_name,
"new": network_security_group,
}
# virtual_machine changes
vm_name = None
if iface.get("virtual_machine"):
vm_name = iface["virtual_machine"]["id"].split("/")[-1]
if virtual_machine and (virtual_machine != vm_name):
ret["changes"]["virtual_machine"] = {"old": vm_name, "new": virtual_machine}
# dns_settings changes
if dns_settings:
if not isinstance(dns_settings, dict):
ret["comment"] = "DNS settings must be provided as a dictionary!"
return ret
for key in dns_settings:
if dns_settings[key].lower() != iface.get("dns_settings", {}).get(key, "").lower():
ret["changes"]["dns_settings"] = {
"old": iface.get("dns_settings"),
"new": dns_settings,
}
break
# ip_configurations changes
comp_ret = saltext.azurerm.utils.azurerm.compare_list_of_dicts(
iface.get("ip_configurations", []),
ip_configurations,
["public_ip_address", "subnet"],
)
if comp_ret.get("comment"):
ret["comment"] = '"ip_configurations" {}'.format(comp_ret["comment"])
return ret
if comp_ret.get("changes"):
ret["changes"]["ip_configurations"] = comp_ret["changes"]
if not ret["changes"]:
ret["result"] = True
ret["comment"] = "Network interface {} is already present.".format(name)
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Network interface {} would be updated.".format(name)
return ret
else:
ret["changes"] = {
"old": {},
"new": {
"name": name,
"ip_configurations": ip_configurations,
"dns_settings": dns_settings,
"network_security_group": network_security_group,
"virtual_machine": virtual_machine,
"enable_accelerated_networking": enable_accelerated_networking,
"enable_ip_forwarding": enable_ip_forwarding,
"mac_address": mac_address,
"primary": primary,
"tags": tags,
},
}
if __opts__["test"]:
ret["comment"] = "Network interface {} would be created.".format(name)
ret["result"] = None
return ret
iface_kwargs = kwargs.copy()
iface_kwargs.update(connection_auth)
iface = __salt__["azurerm_network.network_interface_create_or_update"](
name=name,
subnet=subnet,
virtual_network=virtual_network,
resource_group=resource_group,
ip_configurations=ip_configurations,
dns_settings=dns_settings,
enable_accelerated_networking=enable_accelerated_networking,
enable_ip_forwarding=enable_ip_forwarding,
mac_address=mac_address,
primary=primary,
network_security_group=network_security_group,
virtual_machine=virtual_machine,
tags=tags,
**iface_kwargs
)
if "error" not in iface:
ret["result"] = True
ret["comment"] = "Network interface {} has been created.".format(name)
return ret
ret["comment"] = "Failed to create network interface {}! ({})".format(name, iface.get("error"))
return ret
def network_interface_absent(name, resource_group, connection_auth=None):
"""
.. versionadded:: 2019.2.0
Ensure a network interface does not exist in the resource group.
:param name:
Name of the network interface.
:param resource_group:
The resource group assigned to the network interface.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if not isinstance(connection_auth, dict):
ret["comment"] = "Connection information must be specified via connection_auth dictionary!"
return ret
iface = __salt__["azurerm_network.network_interface_get"](
name, resource_group, azurerm_log_level="info", **connection_auth
)
if "error" in iface:
ret["result"] = True
ret["comment"] = "Network interface {} was not found.".format(name)
return ret
elif __opts__["test"]:
ret["comment"] = "Network interface {} would be deleted.".format(name)
ret["result"] = None
ret["changes"] = {
"old": iface,
"new": {},
}
return ret
deleted = __salt__["azurerm_network.network_interface_delete"](
name, resource_group, **connection_auth
)
if deleted:
ret["result"] = True
ret["comment"] = "Network interface {} has been deleted.".format(name)
ret["changes"] = {"old": iface, "new": {}}
return ret
ret["comment"] = "Failed to delete network interface {}!)".format(name)
return ret
def route_table_present(
name,
resource_group,
tags=None,
routes=None,
disable_bgp_route_propagation=None,
connection_auth=None,
**kwargs
):
"""
.. versionadded:: 2019.2.0
Ensure a route table exists.
:param name:
Name of the route table.
:param resource_group:
The resource group assigned to the route table.
:param routes:
An optional list of dictionaries representing valid Route objects contained within a route table. See the
documentation for the route_present state or route_create_or_update execution module for more information on
required and optional parameters for routes. The routes are only managed if this parameter is present. When this
parameter is absent, implemented routes will not be removed, and will merely become unmanaged.
:param disable_bgp_route_propagation:
An optional boolean parameter setting whether to disable the routes learned by BGP on the route table.
:param tags:
A dictionary of strings can be passed as tag metadata to the route table object.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure route table exists:
azurerm_network.route_table_present:
- name: rt1
- resource_group: group1
- routes:
- name: rt1_route1
address_prefix: '0.0.0.0/0'
next_hop_type: internet
- name: rt1_route2
address_prefix: '192.168.0.0/16'
next_hop_type: vnetlocal
- tags:
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
- require:
- azurerm_resource: Ensure resource group exists
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if not isinstance(connection_auth, dict):
ret["comment"] = "Connection information must be specified via connection_auth dictionary!"
return ret
rt_tbl = __salt__["azurerm_network.route_table_get"](
name, resource_group, azurerm_log_level="info", **connection_auth
)
if "error" not in rt_tbl:
# tag changes
tag_changes = salt.utils.dictdiffer.deep_diff(rt_tbl.get("tags", {}), tags or {})
if tag_changes:
ret["changes"]["tags"] = tag_changes
# disable_bgp_route_propagation changes
# pylint: disable=line-too-long
if disable_bgp_route_propagation and (
disable_bgp_route_propagation != rt_tbl.get("disable_bgp_route_propagation")
):
ret["changes"]["disable_bgp_route_propagation"] = {
"old": rt_tbl.get("disable_bgp_route_propagation"),
"new": disable_bgp_route_propagation,
}
# routes changes
if routes:
comp_ret = saltext.azurerm.utils.azurerm.compare_list_of_dicts(
rt_tbl.get("routes", []), routes
)
if comp_ret.get("comment"):
ret["comment"] = '"routes" {}'.format(comp_ret["comment"])
return ret
if comp_ret.get("changes"):
ret["changes"]["routes"] = comp_ret["changes"]
if not ret["changes"]:
ret["result"] = True
ret["comment"] = "Route table {} is already present.".format(name)
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Route table {} would be updated.".format(name)
return ret
else:
ret["changes"] = {
"old": {},
"new": {
"name": name,
"tags": tags,
"routes": routes,
"disable_bgp_route_propagation": disable_bgp_route_propagation,
},
}
if __opts__["test"]:
ret["comment"] = "Route table {} would be created.".format(name)
ret["result"] = None
return ret
rt_tbl_kwargs = kwargs.copy()
rt_tbl_kwargs.update(connection_auth)
rt_tbl = __salt__["azurerm_network.route_table_create_or_update"](
name=name,
resource_group=resource_group,
disable_bgp_route_propagation=disable_bgp_route_propagation,
routes=routes,
tags=tags,
**rt_tbl_kwargs
)
if "error" not in rt_tbl:
ret["result"] = True
ret["comment"] = "Route table {} has been created.".format(name)
return ret
ret["comment"] = "Failed to create route table {}! ({})".format(name, rt_tbl.get("error"))
return ret
def route_table_absent(name, resource_group, connection_auth=None):
"""
.. versionadded:: 2019.2.0
Ensure a route table does not exist in the resource group.
:param name:
Name of the route table.
:param resource_group:
The resource group assigned to the route table.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if not isinstance(connection_auth, dict):
ret["comment"] = "Connection information must be specified via connection_auth dictionary!"
return ret
rt_tbl = __salt__["azurerm_network.route_table_get"](
name, resource_group, azurerm_log_level="info", **connection_auth
)
if "error" in rt_tbl:
ret["result"] = True
ret["comment"] = "Route table {} was not found.".format(name)
return ret
elif __opts__["test"]:
ret["comment"] = "Route table {} would be deleted.".format(name)
ret["result"] = None
ret["changes"] = {
"old": rt_tbl,
"new": {},
}
return ret
deleted = __salt__["azurerm_network.route_table_delete"](
name, resource_group, **connection_auth
)
if deleted:
ret["result"] = True
ret["comment"] = "Route table {} has been deleted.".format(name)
ret["changes"] = {"old": rt_tbl, "new": {}}
return ret
ret["comment"] = "Failed to delete route table {}!".format(name)
return ret
def route_present(
name,
address_prefix,
next_hop_type,
route_table,
resource_group,
next_hop_ip_address=None,
connection_auth=None,
**kwargs
):
"""
.. versionadded:: 2019.2.0
Ensure a route exists within a route table.
:param name:
Name of the route.
:param address_prefix:
The destination CIDR to which the route applies.
:param next_hop_type:
The type of Azure hop the packet should be sent to. Possible values are: 'VirtualNetworkGateway', 'VnetLocal',
'Internet', 'VirtualAppliance', and 'None'.
:param next_hop_ip_address:
The IP address packets should be forwarded to. Next hop values are only allowed in routes where the next hop
type is 'VirtualAppliance'.
:param route_table:
The name of the existing route table which will contain the route.
:param resource_group:
The resource group assigned to the route table.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure route exists:
azurerm_network.route_present:
- name: rt1_route2
- route_table: rt1
- resource_group: group1
- address_prefix: '192.168.0.0/16'
- next_hop_type: vnetlocal
- connection_auth: {{ profile }}
- require:
- azurerm_network: Ensure route table exists
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if not isinstance(connection_auth, dict):
ret["comment"] = "Connection information must be specified via connection_auth dictionary!"
return ret
route = __salt__["azurerm_network.route_get"](
name, route_table, resource_group, azurerm_log_level="info", **connection_auth
)
if "error" not in route:
if address_prefix != route.get("address_prefix"):
ret["changes"]["address_prefix"] = {
"old": route.get("address_prefix"),
"new": address_prefix,
}
if next_hop_type.lower() != route.get("next_hop_type", "").lower():
ret["changes"]["next_hop_type"] = {
"old": route.get("next_hop_type"),
"new": next_hop_type,
}
if next_hop_type.lower() == "virtualappliance" and next_hop_ip_address != route.get(
"next_hop_ip_address"
):
ret["changes"]["next_hop_ip_address"] = {
"old": route.get("next_hop_ip_address"),
"new": next_hop_ip_address,
}
if not ret["changes"]:
ret["result"] = True
ret["comment"] = "Route {} is already present.".format(name)
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Route {} would be updated.".format(name)
return ret
else:
ret["changes"] = {
"old": {},
"new": {
"name": name,
"address_prefix": address_prefix,
"next_hop_type": next_hop_type,
"next_hop_ip_address": next_hop_ip_address,
},
}
if __opts__["test"]:
ret["comment"] = "Route {} would be created.".format(name)
ret["result"] = None
return ret
route_kwargs = kwargs.copy()
route_kwargs.update(connection_auth)
route = __salt__["azurerm_network.route_create_or_update"](
name=name,
route_table=route_table,
resource_group=resource_group,
address_prefix=address_prefix,
next_hop_type=next_hop_type,
next_hop_ip_address=next_hop_ip_address,
**route_kwargs
)
if "error" not in route:
ret["result"] = True
ret["comment"] = "Route {} has been created.".format(name)
return ret
ret["comment"] = "Failed to create route {}! ({})".format(name, route.get("error"))
return ret
def route_absent(name, route_table, resource_group, connection_auth=None):
"""
.. versionadded:: 2019.2.0
Ensure a route table does not exist in the resource group.
:param name:
Name of the route table.
:param route_table:
The name of the existing route table containing the route.
:param resource_group:
The resource group assigned to the route table.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if not isinstance(connection_auth, dict):
ret["comment"] = "Connection information must be specified via connection_auth dictionary!"
return ret
route = __salt__["azurerm_network.route_get"](
name, route_table, resource_group, azurerm_log_level="info", **connection_auth
)
if "error" in route:
ret["result"] = True
ret["comment"] = "Route {} was not found.".format(name)
return ret
elif __opts__["test"]:
ret["comment"] = "Route {} would be deleted.".format(name)
ret["result"] = None
ret["changes"] = {
"old": route,
"new": {},
}
return ret
deleted = __salt__["azurerm_network.route_delete"](
name, route_table, resource_group, **connection_auth
)
if deleted:
ret["result"] = True
ret["comment"] = "Route {} has been deleted.".format(name)
ret["changes"] = {"old": route, "new": {}}
return ret
ret["comment"] = "Failed to delete route {}!".format(name)
return ret | /saltext.azurerm-4.0.1.tar.gz/saltext.azurerm-4.0.1/src/saltext/azurerm/states/azurerm_network.py | 0.761272 | 0.166337 | azurerm_network.py | pypi |
import logging
import salt.utils.dictdiffer # pylint: disable=import-error
# Azure libs
HAS_LIBS = False
try:
from azure.mgmt.core.tools import parse_resource_id
HAS_LIBS = True
except ImportError:
pass
log = logging.getLogger(__name__)
def present(
name,
resource_group,
vm_size,
admin_username="salt",
os_disk_create_option="FromImage",
os_disk_size_gb=30,
ssh_public_keys=None,
disable_password_auth=None,
custom_data=None,
allow_extensions=None,
enable_automatic_updates=None,
time_zone=None,
allocate_public_ip=False,
create_interfaces=True,
network_resource_group=None,
virtual_network=None,
subnet=None,
network_interfaces=None,
os_managed_disk=None,
os_disk_vhd_uri=None,
os_disk_image_uri=None,
os_type=None,
os_disk_name=None,
os_disk_caching=None,
os_write_accel=None,
os_ephemeral_disk=None,
ultra_ssd_enabled=None,
image=None,
boot_diags_enabled=None,
diag_storage_uri=None,
admin_password=None,
force_admin_password=False,
max_price=None,
provision_vm_agent=True,
userdata_file=None,
userdata=None,
enable_disk_enc=False,
disk_enc_keyvault=None,
disk_enc_volume_type=None,
disk_enc_kek_url=None,
data_disks=None,
availability_set=None,
virtual_machine_scale_set=None,
proximity_placement_group=None,
host=None,
host_group=None,
extensions_time_budget=None,
tags=None,
connection_auth=None,
**kwargs,
):
"""
.. versionadded:: 2.1.0
Ensure a virtual machine exists.
:param name: The virtual machine to ensure is present.
:param resource_group: The resource group name assigned to the virtual machine.
:param vm_size: The size of the virtual machine.
:param admin_username: Specifies the name of the administrator account.
:param os_disk_create_option: (attach, from_image, or empty) Specifies how the virtual machine should be created.
The "attach" value is used when you are using a specialized disk to create the virtual machine. The "from_image"
value is used when you are using an image to create the virtual machine. If you are using a platform image, you
also use the image_reference element. If you are using a marketplace image, you also use the plan element.
:param os_disk_size_gb: Specifies the size of an empty OS disk in gigabytes. This element can be used to overwrite
the size of the disk in a virtual machine image.
:param ssh_public_keys: The list of SSH public keys used to authenticate with Linux based VMs.
:param disable_password_auth: (only on Linux) Specifies whether password authentication should be disabled when SSH
public keys are provided.
:param custom_data: (only on Linux) Specifies a base-64 encoded string of custom data for cloud-init (not user-data
scripts). The base-64 encoded string is decoded to a binary array that is saved as a file on the Virtual
Machine. The maximum length of the binary array is 65535 bytes. For using cloud-init for your VM, see `Using
cloud-init to customize a Linux VM during creation
<https://docs.microsoft.com/en-us/azure/virtual-machines/linux/using-cloud-init>`_
:param allow_extensions: Specifies whether extension operations should be allowed on the virtual machine. This may
only be set to False when no extensions are present on the virtual machine.
:param enable_automatic_updates: (only on Windows) Indicates whether automatic updates are enabled for the Windows
virtual machine. Default value is true. For virtual machine scale sets, this property can be updated and updates
will take effect on OS reprovisioning.
:param time_zone: (only on Windows) Specifies the time zone of the virtual machine. e.g. "Pacific Standard Time"
:param allocate_public_ip: Create and attach a public IP object to the VM.
:param create_interfaces: Create network interfaces to attach to the VM if none are provided.
:param network_resource_group: Specify the resource group of the network components referenced in this module.
:param virtual_network: Virtual network for the subnet which will contain the network interfaces.
:param subnet: Subnet to which the network interfaces will be attached.
:param network_interfaces: A list of network interface references ({"id": "/full/path/to/object"}) to attach.
:param os_managed_disk: A managed disk resource ID or dictionary containing the managed disk parameters. If a
dictionary is provided, "storage_account_type" can be passed in additional to the "id". Storage account type for
the managed disk can include: 'Standard_LRS', 'Premium_LRS', 'StandardSSD_LRS', 'UltraSSD_LRS'. NOTE:
UltraSSD_LRS can only be used with data disks.
:param os_disk_vhd_uri: The virtual hard disk for the OS ({"uri": "/full/path/to/object"}).
:param os_disk_image_uri: The source user image virtual hard disk ({"uri": "/full/path/to/object"}). The virtual
hard disk will be copied before being attached to the virtual machine. If SourceImage is provided, the
destination virtual hard drive must not exist.
:param os_type: (linux or windows) This property allows you to specify the type of the OS that is included in the
disk if creating a VM from user-image or a specialized VHD.
:param os_disk_name: The OS disk name.
:param os_disk_caching: (read_only, read_write, or none) Specifies the caching requirements. Defaults
to "None" for Standard storage and "ReadOnly" for Premium storage.
:param os_write_accel: Boolean value specifies whether write accelerator should be enabled or disabled on the disk.
:param os_ephemeral_disk: Boolean value to enable ephemeral "diff" OS disk. `Ephemeral OS disks
<https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks>`_ are created on the local
virtual machine (VM) storage and not saved to the remote Azure Storage.
:param ultra_ssd_enabled: The flag that enables or disables a capability to have one or more managed data disks with
UltraSSD_LRS storage account type on the VM or VMSS. Managed disks with storage account type UltraSSD_LRS can be
added to a virtual machine or virtual machine scale set only if this property is enabled.
:param image: A pipe-delimited representation of an image to use, in the format of "publisher|offer|sku|version".
Examples - "OpenLogic|CentOS|7.7|latest" or "Canonical|UbuntuServer|18.04-LTS|latest"
:param boot_diags_enabled: Enables boots diagnostics on the Virtual Machine. Required for use of the
diag_storage_uri parameter.
:param diag_storage_uri: Enables boots diagnostics on the Virtual Machine by passing the URI of the storage account
to use for placing the console output and screenshot.
:param admin_password: Specifies the password of the administrator account. Note that there are minimum length,
maximum length, and complexity requirements imposed on this password. See the Azure documentation for details.
:param force_admin_password: A Boolean flag that represents whether or not the admin password should be updated.
If it is set to True, then the admin password will be updated if the virtual machine already exists. If it is
set to False, then the password will not be updated unless other parameters also need to be updated.
Defaults to False.
:param provision_vm_agent: Indicates whether virtual machine agent should be provisioned on the virtual machine.
When this property is not specified in the request body, default behavior is to set it to true. This will ensure
that VM Agent is installed on the VM so that extensions can be added to the VM later. If attempting to set this
value, os_type should also be set in order to ensure the proper OS configuration is used.
:param userdata_file: This parameter can contain a local or web path for a userdata script. If a local file is used,
then the contents of that file will override the contents of the userdata parameter. If a web source is used,
then the userdata parameter should contain the command to execute the script file. For instance, if a file
location of https://raw.githubusercontent.com/saltstack/salt-bootstrap/stable/bootstrap-salt.sh is used then the
userdata parameter would contain "./bootstrap-salt.sh" along with any desired arguments. Note that PowerShell
execution policy may cause issues here. For PowerShell files, considered signed scripts or the more insecure
"powershell -ExecutionPolicy Unrestricted -File ./bootstrap-salt.ps1" addition to the command.
:param userdata: This parameter is used to pass text to be executed on a system. The native shell will be used on a
given host operating system.
:param max_price: Specifies the maximum price you are willing to pay for a Azure Spot VM/VMSS. This price is in US
Dollars. This price will be compared with the current Azure Spot price for the VM size. Also, the prices are
compared at the time of create/update of Azure Spot VM/VMSS and the operation will only succeed if max_price is
greater than the current Azure Spot price. The max_price will also be used for evicting a Azure Spot VM/VMSS if
the current Azure Spot price goes beyond the maxPrice after creation of VM/VMSS. Possible values are any decimal
value greater than zero (example: 0.01538) or -1 indicates default price to be up-to on-demand. You can set the
max_price to -1 to indicate that the Azure Spot VM/VMSS should not be evicted for price reasons. Also, the
default max price is -1 if it is not provided by you.
:param priority: (low or regular) Specifies the priority for the virtual machine.
:param eviction_policy: (deallocate or delete) Specifies the eviction policy for the Azure Spot virtual machine.
:param license_type: (Windows_Client or Windows_Server) Specifies that the image or disk that is being used was
licensed on-premises. This element is only used for images that contain the Windows Server operating system.
:param zones: A list of the virtual machine zones.
:param availability_set: The resource ID of the availability set that the virtual machine should be assigned to.
Virtual machines specified in the same availability set are allocated to different nodes to maximize
availability. For more information about availability sets, see `Manage the availability of virtual
machines <https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-manage-availability>`_.
Currently, a VM can only be added to availability set at creation time. An existing VM cannot be added to an
availability set. This parameter cannot be specified if the ``virtual_machine_scale_set`` parameter is also
specified.
:param virtual_machine_scale_set: The resource ID of the virtual machine scale set that the virtual machine should
be assigned to. Virtual machines specified in the same virtual machine scale set are allocated to different
nodes to maximize availability. Currently, a VM can only be added to virtual machine scale set at creation time.
An existing VM cannot be added to a virtual machine scale set. This parameter cannot be specified if the
``availability_set`` parameter is also specified.
:param proximity_placement_group: The resource ID of the proximity placement group that the virtual machine should
be assigned to.
:param host: The resource ID of the dedicated host that the virtual machine resides in. This parameter cannot be
specified if the ``host_group`` parameter is also specified.
:param host_group: The resource ID of the dedicated host group that the virtual machine resides in. This
parameter cannot be specified if the ``host`` parameter is also specified.
:param extensions_time_budget: Specifies the time alloted for all extensions to start. The time duration should be
between 15 minutes and 120 minutes (inclusive) and should be specified in ISO 8601 format. The default value is
90 minutes (PT1H30M).
:param tags: A dictionary of strings can be passed as tag metadata to the virtual machine object.
:param connection_auth: A dictionary with subscription and authentication parameters to be used in connecting to
the Azure Resource Manager API.
Virtual Machine Disk Encryption:
If you would like to enable disk encryption within the virtual machine you must set the enable_disk_enc
parameter to True. Disk encryption utilizes a VM published by Microsoft.Azure.Security of extension type
AzureDiskEncryptionForLinux or AzureDiskEncryption, depending on your virtual machine OS. More information
about Disk Encryption and its requirements can be found in the links below.
Disk Encryption for Windows Virtual Machines:
https://docs.microsoft.com/en-us/azure/virtual-machines/windows/disk-encryption-overview
Disk Encryption for Linux Virtual Machines:
https://docs.microsoft.com/en-us/azure/virtual-machines/linux/disk-encryption-overview
The following parameters may be used to implement virtual machine disk encryption:
- **param enable_disk_enc**: This boolean flag will represent whether disk encryption has been enabled for the
virtual machine. This is a required parameter.
- **disk_enc_keyvault**: The resource ID of the key vault containing the disk encryption key, which is a
Key Vault Secret. This is a required parameter.
- **disk_enc_volume_type**: The volume type(s) that will be encrypted. Possible values include: 'OS',
'Data', and 'All'. This is a required parameter.
- **disk_enc_kek_url**: The Key Identifier URL for a Key Encryption Key (KEK). The KEK is used as an
additional layer of security for encryption keys. Azure Disk Encryption will use the KEK to wrap the
encryption secrets before writing to the Key Vault. The KEK must be in the same vault as the encryption
secrets. This is an optional parameter.
Attaching Data Disks:
Data disks can be attached by passing a list of dictionaries in the data_disks parameter. The dictionaries in
the list can have the following parameters:
- **lun**: (optional int) Specifies the logical unit number of the data disk. This value is used to identify
data disks within the VM and therefore must be unique for each data disk attached to a VM. If not
provided, we increment the lun designator based upon the index within the provided list of disks.
- **name**: (optional str) The disk name. Defaults to "{vm_name}-datadisk{lun}"
- **vhd**: (optional str or dict) Virtual hard disk to use. If a URI string is provided, it will be nested
under a "uri" key in a dictionary as expected by the SDK.
- **image**: (optional str or dict) The source user image virtual hard disk. The virtual hard disk will be
copied before being attached to the virtual machine. If image is provided, the destination virtual hard
drive must not exist. If a URI string is provided, it will be nested under a "uri" key in a dictionary as
expected by the SDK.
- **caching**: (optional str - read_only, read_write, or none) Specifies the caching requirements. Defaults to
"None" for Standard storage and "ReadOnly" for Premium storage.
- **write_accelerator_enabled**: (optional bool - True or False) Specifies whether write accelerator should be
enabled or disabled on the disk.
- **create_option**: (optional str - attach, from_image, or empty) Specifies how the virtual machine should be
created. The "attach" value is used when you are using a specialized disk to create the virtual machine. The
"from_image" value is used when you are using an image to create the virtual machine. If you are using a
platform image, you also use the image_reference element. If you are using a marketplace image, you also use
the plan element.
- **disk_size_gb**: (optional int) Specifies the size of an empty data disk in gigabytes. This element can be
used to overwrite the size of the disk in a virtual machine image.
- **managed_disk**: (optional str or dict) The managed disk parameters. If an ID string is provided, it will
be nested under an "id" key in a dictionary as expected by the SDK. If a dictionary is provided, the
"storage_account_type" parameter can be passed (accepts (Standard|Premium)_LRS or (Standard|Ultra)SSD_LRS).
Example usage:
.. code-block:: yaml
Ensure virtual machine exists:
azurerm_compute_virtual_machine.present:
- name: salt-vm01
- resource_group: salt-rg01
- vm_size: Standard_B1s
- virtual_network: vnet1
- subnet: default
- allocate_public_ip: True
- ssh_public_keys:
- /home/myuser/.ssh/id_rsa.pub
- tags:
contact_name: Elmer Fudd Gantry
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
action = "create"
if not isinstance(connection_auth, dict):
ret["comment"] = "Connection information must be specified via connection_auth dictionary!"
return ret
virt_mach = __salt__["azurerm_compute_virtual_machine.get"](
name, resource_group, azurerm_log_level="info", **connection_auth
)
if "error" not in virt_mach:
action = "update"
tag_changes = salt.utils.dictdiffer.deep_diff(virt_mach.get("tags", {}), tags or {})
if tag_changes:
ret["changes"]["tags"] = tag_changes
if vm_size.lower() != virt_mach["hardware_profile"]["vm_size"].lower():
ret["changes"]["vm_size"] = {
"old": virt_mach["hardware_profile"]["vm_size"].lower(),
"new": vm_size.lower(),
}
if boot_diags_enabled is not None:
if boot_diags_enabled != virt_mach.get("diagnostics_profile", {}).get(
"boot_diagnostics", {}
).get("enabled", False):
ret["changes"]["boot_diags_enabled"] = {
"old": virt_mach.get("diagnostics_profile", {})
.get("boot_diagnostics", {})
.get("enabled", False),
"new": boot_diags_enabled,
}
if diag_storage_uri:
if diag_storage_uri != virt_mach.get("diagnostics_profile", {}).get(
"boot_diagnostics", {}
).get("storage_uri"):
ret["changes"]["diag_storage_uri"] = {
"old": virt_mach.get("diagnostics_profile", {})
.get("boot_diagnostics", {})
.get("storage_uri"),
"new": diag_storage_uri,
}
if max_price:
if max_price != virt_mach.get("billing_profile", {}).get("max_price"):
ret["changes"]["max_price"] = {
"old": virt_mach.get("billing_profile", {}).get("max_price"),
"new": max_price,
}
if allow_extensions is not None:
if allow_extensions != virt_mach.get("os_profile", {}).get(
"allow_extension_operations", True
):
ret["changes"]["allow_extensions"] = {
"old": virt_mach.get("os_profile", {}).get("allow_extension_operations", True),
"new": allow_extensions,
}
if os_write_accel is not None:
if os_write_accel != virt_mach.get("storage_profile", {}).get("os_disk", {}).get(
"write_accelerator_enabled"
):
ret["changes"]["os_write_accel"] = {
"old": virt_mach.get("storage_profile", {})
.get("os_disk", {})
.get("write_accelerator_enabled"),
"new": os_write_accel,
}
if os_disk_caching is not None:
if os_disk_caching != virt_mach.get("storage_profile", {}).get("os_disk", {}).get(
"caching"
):
ret["changes"]["os_disk_caching"] = {
"old": virt_mach.get("storage_profile", {}).get("os_disk", {}).get("caching"),
"new": os_disk_caching,
}
if ultra_ssd_enabled is not None:
if ultra_ssd_enabled != virt_mach.get("additional_capabilities", {}).get(
"ultra_ssd_enabled"
):
ret["changes"]["ultra_ssd_enabled"] = {
"old": virt_mach.get("additional_capabilities", {}).get("ultra_ssd_enabled"),
"new": ultra_ssd_enabled,
}
if provision_vm_agent is not None:
if virt_mach.get("os_profile", {}).get("linux_configuration", {}):
if provision_vm_agent != virt_mach["os_profile"]["linux_configuration"].get(
"provision_vm_agent", True
):
ret["changes"]["provision_vm_agent"] = {
"old": virt_mach["os_profile"]["linux_configuration"].get(
"provision_vm_agent", True
),
"new": provision_vm_agent,
}
if virt_mach.get("os_profile", {}).get("windows_configuration", {}):
if provision_vm_agent != virt_mach["os_profile"]["windows_configuration"].get(
"provision_vm_agent", True
):
ret["changes"]["provision_vm_agent"] = {
"old": virt_mach["os_profile"]["windows_configuration"].get(
"provision_vm_agent", True
),
"new": provision_vm_agent,
}
if time_zone:
if time_zone != virt_mach.get("os_profile", {}).get("windows_configuration", {}).get(
"time_zone", True
):
ret["changes"]["time_zone"] = {
"old": virt_mach.get("os_profile", {})
.get("windows_configuration", {})
.get("time_zone", True),
"new": time_zone,
}
if enable_automatic_updates is not None:
if enable_automatic_updates != virt_mach.get("os_profile", {}).get(
"windows_configuration", {}
).get("enable_automatic_updates", True):
ret["changes"]["enable_automatic_updates"] = {
"old": virt_mach.get("os_profile", {})
.get("windows_configuration", {})
.get("enable_automatic_updates", True),
"new": enable_automatic_updates,
}
if data_disks is not None:
existing_disks = virt_mach.get("storage_profile", {}).get("data_disks", [])
if len(existing_disks) != len(data_disks):
ret["changes"]["data_disks"] = {
"old": existing_disks,
"new": data_disks,
}
else:
for idx, disk in enumerate(data_disks):
for key in disk:
if isinstance(disk[key], dict) and isinstance(
existing_disks[idx].get(key), dict
):
for k in disk[key]:
if disk[key][k] != existing_disks[idx][key].get(k):
ret["changes"]["data_disks"] = {
"old": existing_disks,
"new": data_disks,
}
else:
if disk[key] != existing_disks[idx].get(key):
ret["changes"]["data_disks"] = {
"old": existing_disks,
"new": data_disks,
}
if enable_disk_enc:
extensions = virt_mach.get("resources", [])
disk_enc_exists = False
for extension in extensions:
if (
extension.get("virtual_machine_extension_type") == "AzureDiskEncryptionForLinux"
or extension.get("virtual_machine_extension_type") == "AzureDiskEncryption"
):
disk_enc_exists = True
break
if not disk_enc_exists:
ret["changes"]["enable_disk_enc"] = {"old": False, "new": True}
if disk_enc_keyvault:
ret["changes"]["disk_enc_keyvault"] = {"new": disk_enc_keyvault}
if disk_enc_volume_type:
ret["changes"]["disk_enc_volume_type"] = {"new": disk_enc_volume_type}
if disk_enc_kek_url:
ret["changes"]["disk_enc_kek_url"] = {"new": disk_enc_kek_url}
if admin_password and force_admin_password:
ret["changes"]["admin_password"] = {"new": "REDACTED"}
elif ret["changes"]:
ret["changes"]["admin_password"] = {"new": "REDACTED"}
if not ret["changes"]:
ret["result"] = True
ret["comment"] = "Virtual machine {} is already present.".format(name)
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Virtual machine {} would be updated.".format(name)
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Virtual machine {} would be created.".format(name)
return ret
vm_kwargs = kwargs.copy()
vm_kwargs.update(connection_auth)
virt_mach = __salt__["azurerm_compute_virtual_machine.create_or_update"](
name=name,
resource_group=resource_group,
vm_size=vm_size,
admin_username=admin_username,
os_disk_create_option=os_disk_create_option,
os_disk_size_gb=os_disk_size_gb,
ssh_public_keys=ssh_public_keys,
disable_password_auth=disable_password_auth,
custom_data=custom_data,
allow_extensions=allow_extensions,
enable_automatic_updates=enable_automatic_updates,
time_zone=time_zone,
allocate_public_ip=allocate_public_ip,
create_interfaces=create_interfaces,
network_resource_group=network_resource_group,
virtual_network=virtual_network,
subnet=subnet,
network_interfaces=network_interfaces,
os_managed_disk=os_managed_disk,
os_disk_vhd_uri=os_disk_vhd_uri,
os_disk_image_uri=os_disk_image_uri,
os_type=os_type,
os_disk_name=os_disk_name,
os_disk_caching=os_disk_caching,
os_write_accel=os_write_accel,
os_ephemeral_disk=os_ephemeral_disk,
ultra_ssd_enabled=ultra_ssd_enabled,
image=image,
boot_diags_enabled=boot_diags_enabled,
diag_storage_uri=diag_storage_uri,
admin_password=admin_password,
max_price=max_price,
provision_vm_agent=provision_vm_agent,
userdata_file=userdata_file,
userdata=userdata,
enable_disk_enc=enable_disk_enc,
disk_enc_keyvault=disk_enc_keyvault,
disk_enc_volume_type=disk_enc_volume_type,
disk_enc_kek_url=disk_enc_kek_url,
data_disks=data_disks,
availability_set=availability_set,
virtual_machine_scale_set=virtual_machine_scale_set,
proximity_placement_group=proximity_placement_group,
host=host,
host_group=host_group,
extensions_time_budget=extensions_time_budget,
tags=tags,
**vm_kwargs,
)
if action == "create":
ret["changes"] = {"old": {}, "new": virt_mach}
if "error" not in virt_mach:
ret["result"] = True
ret["comment"] = f"Virtual machine {name} has been {action}d."
return ret
ret["comment"] = "Failed to {} virtual machine {}! ({})".format(
action, name, virt_mach.get("error")
)
if not ret["result"]:
ret["changes"] = {}
return ret
def absent(
name,
resource_group,
cleanup_osdisks=False,
cleanup_datadisks=False,
cleanup_interfaces=False,
cleanup_public_ips=False,
connection_auth=None,
):
"""
.. versionadded:: 2.1.0
Ensure a virtual machine does not exist in a resource group.
:param name:
Name of the virtual machine.
:param resource_group:
Name of the resource group containing the virtual machine.
:param cleanup_osdisks:
Enable deletion of the operating system disk attached to the virtual machine.
:param cleanup_datadisks:
Enable deletion of ALL of the data disks attached to the virtual machine.
:param cleanup_interfaces:
Enable deletion of ALL of the network interfaces attached to the virtual machine.
:param cleanup_public_ips:
Enable deletion of ALL of the public IP addresses directly attached to the virtual machine.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure virtual machine absent:
azurerm_compute_virtual_machine.absent:
- name: test_machine
- resource_group: test_group
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if not isinstance(connection_auth, dict):
ret["comment"] = "Connection information must be specified via connection_auth dictionary!"
return ret
virt_mach = __salt__["azurerm_compute_virtual_machine.get"](
name, resource_group, azurerm_log_level="info", **connection_auth
)
if "error" in virt_mach:
ret["result"] = True
ret["comment"] = "Virtual machine {} was not found.".format(name)
return ret
if __opts__["test"]:
ret["comment"] = "Virtual machine {} would be deleted.".format(name)
ret["result"] = None
ret["changes"] = {
"old": virt_mach,
"new": {},
}
return ret
deleted = __salt__["azurerm_compute_virtual_machine.delete"](
name, resource_group, **connection_auth
)
if deleted:
if cleanup_osdisks:
virt_mach["cleanup_osdisks"] = True
os_disk = virt_mach["storage_profile"]["os_disk"]
if os_disk.get("managed_disk", {}).get("id"):
disk_link = os_disk["managed_disk"]["id"]
try:
disk_dict = parse_resource_id(disk_link)
disk_name = disk_dict["name"]
disk_group = disk_dict["resource_group"]
except KeyError:
log.error("This isn't a valid disk resource: %s", os_disk)
deleted_disk = __salt__["azurerm_compute_disk.delete"](
disk_name,
disk_group,
azurerm_log_level="info",
**connection_auth,
)
if not deleted_disk:
log.error("Unable to delete disk: %s", disk_link)
if cleanup_datadisks:
virt_mach["cleanup_datadisks"] = True
for disk in virt_mach["storage_profile"].get("data_disks", []):
if disk.get("managed_disk", {}).get("id"):
disk_link = disk["managed_disk"]["id"]
try:
disk_dict = parse_resource_id(disk_link)
disk_name = disk_dict["name"]
disk_group = disk_dict["resource_group"]
except KeyError:
log.error("This isn't a valid disk resource: %s", os_disk)
continue
deleted_disk = __salt__["azurerm_compute_disk.delete"](
disk_name,
disk_group,
azurerm_log_level="info",
**connection_auth,
)
if not deleted_disk:
log.error("Unable to delete disk: %s", disk_link)
if cleanup_interfaces:
virt_mach["cleanup_interfaces"] = True
for nic_link in virt_mach.get("network_profile", {}).get("network_interfaces", []):
try:
nic_dict = parse_resource_id(nic_link["id"])
nic_name = nic_dict["name"]
nic_group = nic_dict["resource_group"]
except KeyError:
log.error("This isn't a valid network interface subresource: %s", nic_link)
continue
nic = __salt__["azurerm_network.network_interface_get"](
nic_name,
nic_group,
azurerm_log_level="info",
**connection_auth,
)
# pylint: disable=unused-variable
deleted_nic = __salt__["azurerm_network.network_interface_delete"](
nic_name,
nic_group,
azurerm_log_level="info",
**connection_auth,
)
if cleanup_public_ips:
virt_mach["cleanup_public_ips"] = True
for ipc in nic.get("ip_configurations", []):
if "public_ip_address" not in ipc:
continue
try:
pip_dict = parse_resource_id(ipc["public_ip_address"]["id"])
pip_name = pip_dict["name"]
pip_group = pip_dict["resource_group"]
except KeyError:
log.error(
"This isn't a valid public IP subresource: %s",
ipc.get("public_ip_address"),
)
continue
# pylint: disable=unused-variable
deleted_pip = __salt__["azurerm_network.public_ip_address_delete"](
pip_name,
pip_group,
azurerm_log_level="info",
**connection_auth,
)
ret["result"] = True
ret["comment"] = "Virtual machine {} has been deleted.".format(name)
ret["changes"] = {"old": virt_mach, "new": {}}
return ret
ret["comment"] = "Failed to delete virtual machine {}!".format(name)
return ret | /saltext.azurerm-4.0.1.tar.gz/saltext.azurerm-4.0.1/src/saltext/azurerm/states/azurerm_compute_virtual_machine.py | 0.756807 | 0.211763 | azurerm_compute_virtual_machine.py | pypi |
import logging
from operator import itemgetter
import salt.utils.dictdiffer # pylint: disable=import-error
log = logging.getLogger(__name__)
def present(
name,
resource_group,
location,
tenant_id,
sku,
access_policies=None,
vault_uri=None,
create_mode=None,
enabled_for_deployment=None,
enabled_for_disk_encryption=None,
enabled_for_template_deployment=None,
enable_soft_delete=None,
soft_delete_retention=None,
enable_purge_protection=None,
enable_rbac_authorization=None,
network_acls=None,
tags=None,
connection_auth=None,
**kwargs,
):
"""
.. versionadded:: 2.1.0
Ensure a specified keyvault exists.
:param name: The name of the vault.
:param resource_group: The name of the resource group to which the vault belongs.
:param location: The supported Azure location where the key vault should be created.
:param tenant_id: The Azure Active Direction tenant ID that should be used for authenticating requests to
the key vault.
:param sku: The SKU name to specify whether the key vault is a standard vault or a premium vault. Possible
values include: 'standard' and 'premium'.
:param access_policies: A list of 0 to 16 dictionaries that represent AccessPolicyEntry objects. The
AccessPolicyEntry objects represent identities that have access to the key vault. All identities in the
list must use the same tenant ID as the key vault's tenant ID. When createMode is set to "recover", access
policies are not required. Otherwise, access policies are required. Valid parameters are:
- ``tenant_id``: (Required) The Azure Active Directory tenant ID that should be used for authenticating
requests to the key vault.
- ``object_id``: (Required) The object ID of a user, service principal, or security group in the Azure Active
Directory tenant for the vault. The object ID must be unique for the list of access policies.
- ``application_id``: (Optional) Application ID of the client making request on behalf of a principal.
- ``permissions``: (Required) A dictionary representing permissions the identity has for keys, secrets, and
certifications. Valid parameters include:
- ``keys``: A list that represents permissions to keys. Possible values include: 'backup', 'create',
'decrypt', 'delete', 'encrypt', 'get', 'import_enum', 'list', 'purge', 'recover', 'restore', 'sign',
'unwrap_key', 'update', 'verify', and 'wrap_key'.
- ``secrets``: A list that represents permissions to secrets. Possible values include: 'backup', 'delete',
'get', 'list', 'purge', 'recover', 'restore', and 'set'.
- ``certificates``: A list that represents permissions to certificates. Possible values include: 'create',
'delete', 'deleteissuers', 'get', 'getissuers', 'import_enum', 'list', 'listissuers', 'managecontacts',
'manageissuers', 'purge', 'recover', 'setissuers', and 'update'.
- ``storage``: A list that represents permissions to storage accounts. Possible values include: 'backup',
'delete', 'deletesas', 'get', 'getsas', 'list', 'listsas', 'purge', 'recover', 'regeneratekey',
'restore', 'set', 'setsas', and 'update'.
:param vault_uri: The URI of the vault for performing operations on keys and secrets.
:param create_mode: The vault's create mode to indicate whether the vault needs to be recovered or not.
Possible values include: 'recover' and 'default'.
:param enabled_for_deployment: A boolean value specifying whether Azure Virtual Machines are permitted to
retrieve certificates stored as secrets from the key vault.
:param enabled_for_disk_encryption: A boolean value specifying whether Azure Disk Encrpytion is permitted
to retrieve secrets from the vault and unwrap keys.
:param enabled_for_template_deployment: A boolean value specifying whether Azure Resource Manager is
permitted to retrieve secrets from the key vault.
:param create_mode: The vault's create mode to indicate whether the vault needs to be recovered or not.
Possible values include: 'recover' and 'default'.
:param enable_soft_delete: A boolean value that specifies whether the 'soft delete' functionality is
enabled for this key vault. If it's not set to any value (True or False) when creating new key vault, it will
be set to True by default. Once set to True, it cannot be reverted to False.
:param soft_delete_retention: The soft delete data retention period in days. It accepts values between
7-90, inclusive. Default value is 90.
:param enable_purge_protection: A boolean value specifying whether protection against purge is enabled for this
vault. Setting this property to True activates protection against purge for this vault and its content - only
the Key Vault service may initiate a hard, irrecoverable deletion. Enabling this functionality is irreversible,
that is, the property does not accept False as its value. This is only effective if soft delete has been
enabled via the ``enable_soft_delete`` parameter.
:param enable_rbac_authorization: A boolean value that controls how data actions are authorized. When set to True,
the key vault will use Role Based Access Control (RBAC) for authorization of data actions, and the access
policies specified in vault properties will be ignored (warning: this is a preview feature). When set as
False, the key vault will use the access policies specified in vault properties, and any policy stored on Azure
Resource Manager will be ignored. Note that management actions are always authorized with RBAC. Defaults
to False.
:param network_acls: A dictionary representing a NetworkRuleSet. Rules governing the accessibility of
the key vault from specific network locations.
:param tags: The tags that will be assigned to the key vault.
:param connection_auth: A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure key vault exists:
azurerm_keyvault_vault.present:
- name: my_vault
- resource_group: my_rg
- location: my_location
- tenant_id: my_tenant
- sku: my_sku
- access_policies:
- tenant_id: my_tenant
object_id: my_object
permissions:
keys:
- perm1
- perm2
- perm3
secrets:
- perm1
- perm2
- perm3
certificates:
- perm1
- perm2
- perm3
- tags:
contact_name: Elmer Fudd Gantry
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
action = "create"
if not isinstance(connection_auth, dict):
ret["comment"] = "Connection information must be specified via connection_auth dictionary!"
return ret
vault = __salt__["azurerm_keyvault_vault.get"](
name, resource_group, azurerm_log_level="info", **connection_auth
)
if "error" not in vault:
action = "update"
ret["changes"]["properties"] = {}
tag_changes = salt.utils.dictdiffer.deep_diff(vault.get("tags", {}), tags or {})
if tag_changes:
ret["changes"]["tags"] = tag_changes
# Checks for changes in the account_policies parameter
if len(access_policies or []) == len(vault.get("properties").get("access_policies", [])):
new_policies_sorted = sorted(access_policies or [], key=itemgetter("object_id"))
old_policies_sorted = sorted(
vault.get("properties").get("access_policies", []),
key=itemgetter("object_id"),
)
changed = False
for index, new_policy in enumerate(new_policies_sorted):
old_policy = old_policies_sorted[index]
# Checks for changes with the tenant_id key
if old_policy.get("tenant_id") != new_policy.get("tenant_id"):
changed = True
break
# Checks for changes with the object_id key
if old_policy.get("object_id") != new_policy.get("object_id"):
changed = True
break
# Checks for changes with the application_id key
if old_policy.get("application_id") != new_policy.get("application_id"):
changed = True
break
# Checks for changes within the permissions key
if new_policy["permissions"].get("keys") is not None:
new_keys = sorted(new_policy["permissions"].get("keys"))
old_keys = sorted(old_policy["permissions"].get("keys", []))
if new_keys != old_keys:
changed = True
break
# Checks for changes within the secrets key
if new_policy["permissions"].get("secrets") is not None:
new_secrets = sorted(new_policy["permissions"].get("secrets"))
old_secrets = sorted(old_policy["permissions"].get("secrets", []))
if new_secrets != old_secrets:
changed = True
break
# Checks for changes within the certificates key
if new_policy["permissions"].get("certificates") is not None:
new_certificates = sorted(new_policy["permissions"].get("certificates"))
old_certificates = sorted(old_policy["permissions"].get("certificates", []))
if new_certificates != old_certificates:
changed = True
break
if changed:
ret["changes"]["properties"]["access_policies"] = {
"old": vault.get("properties").get("access_policies", []),
"new": access_policies,
}
else:
ret["changes"]["properties"]["access_policies"] = {
"old": vault.get("properties").get("access_policies", []),
"new": access_policies,
}
if sku != vault.get("properties").get("sku").get("name"):
ret["changes"]["properties"]["sku"] = {
"old": vault.get("properties").get("sku"),
"new": {"name": sku},
}
if enabled_for_deployment is not None:
if enabled_for_deployment != vault.get("properties").get("enabled_for_deployment"):
ret["changes"]["properties"]["enabled_for_deployment"] = {
"old": vault.get("properties").get("enabled_for_deployment"),
"new": enabled_for_deployment,
}
if enabled_for_disk_encryption is not None:
if enabled_for_disk_encryption != vault.get("properties").get(
"enabled_for_disk_encryption"
):
ret["changes"]["properties"]["enabled_for_disk_encryption"] = {
"old": vault.get("properties").get("enabled_for_disk_encryption"),
"new": enabled_for_disk_encryption,
}
if enabled_for_template_deployment is not None:
if enabled_for_template_deployment != vault.get("properties").get(
"enabled_for_template_deployment"
):
ret["changes"]["properties"]["enabled_for_template_deployment"] = {
"old": vault.get("properties").get("enabled_for_template_deployment"),
"new": enabled_for_template_deployment,
}
if enable_soft_delete is not None:
if enable_soft_delete != vault.get("properties").get("enable_soft_delete"):
ret["changes"]["properties"]["enable_soft_delete"] = {
"old": vault.get("properties").get("enable_soft_delete"),
"new": enable_soft_delete,
}
if enable_purge_protection is not None:
if enable_purge_protection != vault.get("properties").get("enable_purge_protection"):
ret["changes"]["properties"]["enable_purge_protection"] = {
"old": vault.get("properties").get("enable_purge_protection"),
"new": enable_purge_protection,
}
if enable_rbac_authorization is not None:
if enable_rbac_authorization != vault.get("properties").get(
"enable_rbac_authorization"
):
ret["changes"]["properties"]["enable_rbac_authorization"] = {
"old": vault.get("properties").get("enable_rbac_authorization"),
"new": enable_rbac_authorization,
}
if network_acls:
acls_changes = salt.utils.dictdiffer.deep_diff(
vault.get("properties").get("network_acls", {}), network_acls or {}
)
if acls_changes:
ret["changes"]["properties"]["network_acls"] = acls_changes
if not ret["changes"]["properties"]:
del ret["changes"]["properties"]
if not ret["changes"]:
ret["result"] = True
ret["comment"] = "Key Vault {} is already present.".format(name)
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Key Vault {} would be updated.".format(name)
return ret
if __opts__["test"]:
ret["comment"] = "Key vault {} would be created.".format(name)
ret["result"] = None
return ret
vault_kwargs = kwargs.copy()
vault_kwargs.update(connection_auth)
vault = __salt__["azurerm_keyvault_vault.create_or_update"](
name=name,
resource_group=resource_group,
location=location,
tenant_id=tenant_id,
sku=sku,
access_policies=access_policies,
vault_uri=vault_uri,
create_mode=create_mode,
enable_soft_delete=enable_soft_delete,
enable_purge_protection=enable_purge_protection,
soft_delete_retention=soft_delete_retention,
enabled_for_deployment=enabled_for_deployment,
enabled_for_disk_encryption=enabled_for_disk_encryption,
enabled_for_template_deployment=enabled_for_template_deployment,
enable_rbac_authorization=enable_rbac_authorization,
network_acls=network_acls,
tags=tags,
**vault_kwargs,
)
if action == "create":
ret["changes"] = {"old": {}, "new": vault}
if "error" not in vault:
ret["result"] = True
ret["comment"] = f"Key Vault {name} has been {action}d."
return ret
ret["comment"] = "Failed to {} Key Vault {}! ({})".format(action, name, vault.get("error"))
if not ret["result"]:
ret["changes"] = {}
return ret
def absent(name, resource_group, connection_auth=None):
"""
.. versionadded:: 2.1.0
Ensure a specified key vault does not exist.
:param name: The name of the vault.
:param resource_group: The name of the resource group to which the vault belongs.
:param connection_auth: A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure key vault is absent:
azurerm_keyvault_vault.absent:
- name: my_vault
- resource_group: my_rg
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if not isinstance(connection_auth, dict):
ret["comment"] = "Connection information must be specified via connection_auth dictionary!"
return ret
vault = __salt__["azurerm_keyvault_vault.get"](
name, resource_group, azurerm_log_level="info", **connection_auth
)
if "error" in vault:
ret["result"] = True
ret["comment"] = "Key Vault {} was not found.".format(name)
return ret
if __opts__["test"]:
ret["comment"] = "Key Vault {} would be deleted.".format(name)
ret["result"] = None
ret["changes"] = {
"old": vault,
"new": {},
}
return ret
deleted = __salt__["azurerm_keyvault_vault.delete"](name, resource_group, **connection_auth)
if deleted:
ret["result"] = True
ret["comment"] = "Key Vault {} has been deleted.".format(name)
ret["changes"] = {"old": vault, "new": {}}
return ret
ret["comment"] = "Failed to delete Key Vault {}!".format(name)
return ret | /saltext.azurerm-4.0.1.tar.gz/saltext.azurerm-4.0.1/src/saltext/azurerm/states/azurerm_keyvault_vault.py | 0.772015 | 0.2296 | azurerm_keyvault_vault.py | pypi |
import logging
import salt.utils.dictdiffer # pylint: disable=import-error
log = logging.getLogger(__name__)
def present(
name,
key_type,
vault_url,
key_operations=None,
size=None,
curve=None,
hardware_protected=None,
enabled=None,
expires_on=None,
not_before=None,
tags=None,
connection_auth=None,
**kwargs,
):
"""
.. versionadded:: 2.1.0
Ensure the specified key exists within the given key vault. Requires keys/create permission. Key properties can be
specified as keyword arguments.
:param name: The name of the new key. Key names can only contain alphanumeric characters and dashes.
:param key_type: The type of key to create. Possible values include: 'ec', 'ec_hsm', 'oct', 'rsa', 'rsa_hsm'.
:param vault_url: The URL of the vault that the client will access.
:param key_operations: A list of permitted key operations. Possible values include: 'decrypt', 'encrypt',
'sign', 'unwrap_key', 'verify', 'wrap_key'.
:param size: RSA key size in bits, for example 2048, 3072, or 4096. Applies to RSA keys only.
:param curve: Elliptic curve name. Defaults to the NIST P-256 elliptic curve. Possible values include:
"P-256", "P-256K", "P-384", "P-521".
:param enabled: Whether the key is enabled for use.
:param expires_on: When the key will expire, in UTC. This parameter should be a string representation
of a Datetime object in ISO-8601 format.
:param not_before: The time before which the key can not be used, in UTC. This parameter should be a
string representation of a Datetime object in ISO-8601 format.
:param tags: Application specific metadata in the form of key-value pairs.
:param connection_auth: A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure key exists:
azurerm_keyvault_key.present:
- name: my_key
- key_type: my_type
- vault_url: my_vault
- tags:
contact_name: Elmer Fudd Gantry
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
action = "create"
if not isinstance(connection_auth, dict):
ret[
"comment"
] = "Connection information must be specified via acct or connection_auth dictionary!"
return ret
key = __salt__["azurerm_keyvault_key.get_key"](
name=name,
vault_url=vault_url,
azurerm_log_level="info",
**connection_auth,
)
if key_type != "oct":
key_type = key_type.upper().replace("_", "-")
if "error" not in key:
action = "update"
if tags:
tag_changes = salt.utils.dictdiffer.deep_diff(
key.get("properties", {}).get("tags", {}) or {}, tags or {}
)
if tag_changes:
ret["changes"]["tags"] = tag_changes
if isinstance(key_operations, list):
if sorted(key_operations) != sorted(key.get("key_operations", [])):
ret["changes"]["key_operations"] = {
"old": key.get("key_operations"),
"new": key_operations,
}
if enabled is not None:
if enabled != key.get("properties", {}).get("enabled"):
ret["changes"]["enabled"] = {
"old": key.get("properties", {}).get("enabled"),
"new": enabled,
}
if hardware_protected is not None:
if enabled != key.get("properties", {}).get("hardware_protected"):
ret["changes"]["hardware_protected"] = {
"old": key.get("properties", {}).get("hardware_protected"),
"new": hardware_protected,
}
if expires_on:
if expires_on != key.get("properties", {}).get("expires_on"):
ret["changes"]["expires_on"] = {
"old": key.get("properties", {}).get("expires_on"),
"new": expires_on,
}
if not_before:
if not_before != key.get("properties", {}).get("not_before"):
ret["changes"]["not_before"] = {
"old": key.get("properties", {}).get("not_before"),
"new": not_before,
}
if not ret["changes"]:
ret["result"] = True
ret["comment"] = "Key {} is already present.".format(name)
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Key {} would be updated.".format(name)
return ret
if __opts__["test"]:
ret["comment"] = "Key {} would be created.".format(name)
ret["result"] = None
return ret
key_kwargs = kwargs.copy()
key_kwargs.update(connection_auth)
key = __salt__["azurerm_keyvault_key.create_key"](
name=name,
vault_url=vault_url,
key_type=key_type,
tags=tags,
key_operations=key_operations,
enabled=enabled,
hardware_protected=hardware_protected,
not_before=not_before,
expires_on=expires_on,
size=size,
curve=curve,
**key_kwargs,
)
if action == "create":
ret["changes"] = {"old": {}, "new": key}
if "error" not in key:
ret["result"] = True
ret["comment"] = f"Key {name} has been {action}d."
return ret
ret["comment"] = "Failed to {} Key {}! ({})".format(action, name, key.get("error"))
if not ret["result"]:
ret["changes"] = {}
return ret
def absent(name, vault_url, connection_auth=None):
"""
.. versionadded:: 2.1.0
Ensure the specified key does not exist within the given key vault.
:param name: The name of the key to delete.
:param vault_url: The URL of the vault that the client will access.
:param connection_auth: A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure key is absent:
azurerm_keyvault_key.absent:
- name: my_key
- vault_url: my_vault
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if not isinstance(connection_auth, dict):
ret[
"comment"
] = "Connection information must be specified via acct or connection_auth dictionary!"
return ret
key = __salt__["azurerm_keyvault_key.get_key"](
name=name,
vault_url=vault_url,
azurerm_log_level="info",
**connection_auth,
)
if "error" in key:
ret["result"] = True
ret["comment"] = "Key {} was not found.".format(name)
return ret
if __opts__["test"]:
ret["comment"] = "Key {} would be deleted.".format(name)
ret["result"] = None
ret["changes"] = {
"old": key,
"new": {},
}
return ret
deleted = __salt__["azurerm_keyvault_key.begin_delete_key"](
name=name, vault_url=vault_url, **connection_auth
)
if deleted:
ret["result"] = True
ret["comment"] = "Key {} has been deleted.".format(name)
ret["changes"] = {"old": key, "new": {}}
return ret
ret["comment"] = "Failed to delete Key {}!".format(name)
return ret | /saltext.azurerm-4.0.1.tar.gz/saltext.azurerm-4.0.1/src/saltext/azurerm/states/azurerm_keyvault_key.py | 0.790692 | 0.242379 | azurerm_keyvault_key.py | pypi |
import logging
import salt.utils.dictdiffer # pylint: disable=import-error
log = logging.getLogger(__name__)
def present(
name,
value,
vault_url,
content_type=None,
enabled=None,
expires_on=None,
not_before=None,
tags=None,
version=None,
connection_auth=None,
**kwargs,
):
"""
.. versionadded:: 2.1.0
Ensure the specified secret exists within the given key vault. Requires secrets/set permission. Secret properties
can be specified as keyword arguments.
:param name: The name of the secret. Secret names can only contain alphanumeric characters and dashes.
:param value: The value of the secret.
:param vault_url: The URL of the vault that the client will access.
:param content_type: An arbitrary string indicating the type of the secret.
:param enabled: Whether the secret is enabled for use.
:param expires_on: When the secret will expire, in UTC. This parameter should be a string representation
of a Datetime object in ISO-8601 format.
:param not_before: The time before which the secret cannot be used, in UTC. This parameter should be a
string representation of a Datetime object in ISO-8601 format.
:param tags: A dictionary of strings can be passed as tag metadata to the secret.
:param version: The version of the secret. By default, a new version of the secret will not be created if the name
is already in use UNLESS the value of the secret is changed. Secret properties will be updated on the latest
version unless otherwise specified with this parameter.
:param connection_auth: A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure secret exists:
azurerm_keyvault_secret.present:
- name: secretname
- value: supersecret
- content_type: "text/plain"
- vault_url: "https://myvault.vault.azure.net/"
- tags:
contact_name: Elmer Fudd Gantry
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
action = "create"
if not isinstance(connection_auth, dict):
ret[
"comment"
] = "Connection information must be specified via acct or connection_auth dictionary!"
return ret
secret = __salt__["azurerm_keyvault_secret.get_secret"](
name=name,
vault_url=vault_url,
azurerm_log_level="info",
**connection_auth,
)
if "error" not in secret:
action = "update"
if value != secret.get("value"):
ret["changes"]["value"] = {
"old": "REDACTED_OLD_VALUE",
"new": "REDACTED_NEW_VALUE",
}
if tags:
tag_changes = salt.utils.dictdiffer.deep_diff(
secret.get("properties", {}).get("tags", {}) or {}, tags or {}
)
if tag_changes:
ret["changes"]["tags"] = tag_changes
if content_type:
if (
content_type.lower()
!= (secret.get("properties", {}).get("content_type", "") or "").lower()
):
ret["changes"]["content_type"] = {
"old": secret.get("properties", {}).get("content_type"),
"new": content_type,
}
if enabled is not None:
if enabled != secret.get("properties", {}).get("enabled"):
ret["changes"]["enabled"] = {
"old": secret.get("properties", {}).get("enabled"),
"new": enabled,
}
if expires_on:
if expires_on != secret.get("properties", {}).get("expires_on"):
ret["changes"]["expires_on"] = {
"old": secret.get("properties", {}).get("expires_on"),
"new": expires_on,
}
if not_before:
if not_before != secret.get("properties", {}).get("not_before"):
ret["changes"]["not_before"] = {
"old": secret.get("properties", {}).get("not_before"),
"new": not_before,
}
if not ret["changes"]:
ret["result"] = True
ret["comment"] = "Secret {} is already present.".format(name)
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Secret {} would be updated.".format(name)
return ret
if __opts__["test"]:
ret["comment"] = "Secret {} would be created.".format(name)
ret["result"] = None
return ret
secret_kwargs = kwargs.copy()
secret_kwargs.update(connection_auth)
if action == "create" or (action == "update" and ret["changes"].get("value")):
secret = __salt__["azurerm_keyvault_secret.set_secret"](
name=name,
value=value,
vault_url=vault_url,
content_type=content_type,
enabled=enabled,
expires_on=expires_on,
not_before=not_before,
tags=tags,
**secret_kwargs,
)
else:
secret = __salt__["azurerm_keyvault_secret.update_secret_properties"](
name=name,
vault_url=vault_url,
version=version,
content_type=content_type,
enabled=enabled,
expires_on=expires_on,
not_before=not_before,
tags=tags,
**secret_kwargs,
)
if action == "create":
ret["changes"] = {"old": {}, "new": secret}
if ret["changes"]["new"].get("value"):
ret["changes"]["new"]["value"] = "REDACTED"
if "error" not in secret:
ret["result"] = True
ret["comment"] = f"Secret {name} has been {action}d."
return ret
ret["comment"] = "Failed to {} Secret {}! ({})".format(action, name, secret.get("error"))
if not ret["result"]:
ret["changes"] = {}
return ret
def absent(name, vault_url, purge=False, wait=False, connection_auth=None):
"""
.. versionadded:: 2.1.0
Ensure the specified secret does not exist within the given key vault.
:param name: The name of the secret to delete.
:param vault_url: The URL of the vault that the client will access.
:param purge: Permanently deletes a deleted secret. Possible only in vaults with soft-delete enabled. Performs an
irreversible deletion of the specified secret, without possibility for recovery. The operation is not available
if the ``recovery_level`` does not specify 'Purgeable'.
:param wait: When this method returns, Key Vault has begun deleting the secret. Deletion may take several seconds in
a vault with soft-delete enabled. Setting this parameter to ``True`` enables you to wait for deletion to
complete.
:param connection_auth: A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure secret is absent:
azurerm_keyvault_secret.absent:
- name: secretname
- vault_url: "https://myvault.vault.azure.net/"
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
action = "delete"
if not isinstance(connection_auth, dict):
ret[
"comment"
] = "Connection information must be specified via acct or connection_auth dictionary!"
return ret
secret = __salt__["azurerm_keyvault_secret.get_secret"](
name=name,
vault_url=vault_url,
azurerm_log_level="info",
**connection_auth,
)
if "error" in secret:
action = "purge"
if purge:
secret = __salt__["azurerm_keyvault_secret.get_deleted_secret"](
name=name,
vault_url=vault_url,
azurerm_log_level="info",
**connection_auth,
)
if "error" in secret:
ret["result"] = True
ret["comment"] = f"Secret {name} was not found."
return ret
if __opts__["test"]:
ret["comment"] = f"Secret {name} would be {action}d."
ret["result"] = None
ret["changes"] = {
"old": secret,
"new": {},
}
return ret
if action == "delete":
deleted = __salt__["azurerm_keyvault_secret.delete_secret"](
name=name, vault_url=vault_url, wait=wait, **connection_auth
)
if purge:
action = "purge"
deleted = __salt__["azurerm_keyvault_secret.purge_deleted_secret"](
name=name, vault_url=vault_url, **connection_auth
)
if deleted:
ret["result"] = True
ret["comment"] = f"Secret {name} has been {action}d."
ret["changes"] = {"old": secret, "new": {}}
return ret
ret["comment"] = f"Failed to {action} Secret {name}!"
return ret | /saltext.azurerm-4.0.1.tar.gz/saltext.azurerm-4.0.1/src/saltext/azurerm/states/azurerm_keyvault_secret.py | 0.750553 | 0.256352 | azurerm_keyvault_secret.py | pypi |
import logging
import salt.utils.dictdiffer # pylint: disable=import-error
log = logging.getLogger(__name__)
def __virtual__():
"""
Only make this state available if the azurerm_compute module is available.
"""
if "azurerm_compute_availability_set.create_or_update" in __salt__:
return True
return (False, "azurerm module could not be loaded")
def present(
name,
resource_group,
tags=None,
platform_update_domain_count=None,
platform_fault_domain_count=None,
virtual_machines=None,
sku=None,
connection_auth=None,
**kwargs
):
"""
.. versionadded:: 2.1.0
Ensure an availability set exists.
:param name:
Name of the availability set.
:param resource_group:
The resource group assigned to the availability set.
:param tags:
A dictionary of strings can be passed as tag metadata to the availability set object.
:param platform_update_domain_count:
An optional parameter which indicates groups of virtual machines and underlying physical hardware that can be
rebooted at the same time.
:param platform_fault_domain_count:
An optional parameter which defines the group of virtual machines that share a common power source and network
switch.
:param virtual_machines:
A list of names of existing virtual machines to be included in the availability set.
:param sku:
The availability set SKU, which specifies whether the availability set is managed or not. Possible values are
'Aligned' or 'Classic'. An 'Aligned' availability set is managed, 'Classic' is not.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure availability set exists:
azurerm_compute.availability_set_present:
- name: aset1
- resource_group: group1
- platform_update_domain_count: 5
- platform_fault_domain_count: 3
- sku: aligned
- tags:
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
- require:
- azurerm_resource: Ensure resource group exists
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if not isinstance(connection_auth, dict):
ret["comment"] = "Connection information must be specified via connection_auth dictionary!"
return ret
if sku:
sku = {"name": sku.capitalize()}
aset = __salt__["azurerm_compute_availability_set.get"](
name, resource_group, azurerm_log_level="info", **connection_auth
)
if "error" not in aset:
tag_changes = salt.utils.dictdiffer.deep_diff(aset.get("tags", {}), tags or {})
if tag_changes:
ret["changes"]["tags"] = tag_changes
if platform_update_domain_count and (
int(platform_update_domain_count) != aset.get("platform_update_domain_count")
):
ret["changes"]["platform_update_domain_count"] = {
"old": aset.get("platform_update_domain_count"),
"new": platform_update_domain_count,
}
if platform_fault_domain_count and (
int(platform_fault_domain_count) != aset.get("platform_fault_domain_count")
):
ret["changes"]["platform_fault_domain_count"] = {
"old": aset.get("platform_fault_domain_count"),
"new": platform_fault_domain_count,
}
if sku and (sku["name"] != aset.get("sku", {}).get("name")):
ret["changes"]["sku"] = {"old": aset.get("sku"), "new": sku}
if virtual_machines:
if not isinstance(virtual_machines, list):
ret["comment"] = "Virtual machines must be supplied as a list!"
return ret
aset_vms = aset.get("virtual_machines", [])
remote_vms = sorted(
vm["id"].split("/")[-1].lower() for vm in aset_vms if "id" in aset_vms
)
local_vms = sorted(vm.lower() for vm in virtual_machines or [])
if local_vms != remote_vms:
ret["changes"]["virtual_machines"] = {
"old": aset_vms,
"new": virtual_machines,
}
if not ret["changes"]:
ret["result"] = True
ret["comment"] = "Availability set {} is already present.".format(name)
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Availability set {} would be updated.".format(name)
return ret
else:
ret["changes"] = {
"old": {},
"new": {
"name": name,
"virtual_machines": virtual_machines,
"platform_update_domain_count": platform_update_domain_count,
"platform_fault_domain_count": platform_fault_domain_count,
"sku": sku,
"tags": tags,
},
}
if __opts__["test"]:
ret["comment"] = "Availability set {} would be created.".format(name)
ret["result"] = None
return ret
aset_kwargs = kwargs.copy()
aset_kwargs.update(connection_auth)
aset = __salt__["azurerm_compute_availability_set.create_or_update"](
name=name,
resource_group=resource_group,
virtual_machines=virtual_machines,
platform_update_domain_count=platform_update_domain_count,
platform_fault_domain_count=platform_fault_domain_count,
sku=sku,
tags=tags,
**aset_kwargs
)
if "error" not in aset:
ret["result"] = True
ret["comment"] = "Availability set {} has been created.".format(name)
return ret
ret["comment"] = "Failed to create availability set {}! ({})".format(name, aset.get("error"))
return ret
def absent(name, resource_group, connection_auth=None):
"""
.. versionadded:: 2019.2.0
Ensure an availability set does not exist in a resource group.
:param name:
Name of the availability set.
:param resource_group:
Name of the resource group containing the availability set.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if not isinstance(connection_auth, dict):
ret["comment"] = "Connection information must be specified via connection_auth dictionary!"
return ret
aset = __salt__["azurerm_compute_availability_set.get"](
name, resource_group, azurerm_log_level="info", **connection_auth
)
if "error" in aset:
ret["result"] = True
ret["comment"] = "Availability set {} was not found.".format(name)
return ret
elif __opts__["test"]:
ret["comment"] = "Availability set {} would be deleted.".format(name)
ret["result"] = None
ret["changes"] = {
"old": aset,
"new": {},
}
return ret
deleted = __salt__["azurerm_compute_availability_set.delete"](
name, resource_group, **connection_auth
)
if deleted:
ret["result"] = True
ret["comment"] = "Availability set {} has been deleted.".format(name)
ret["changes"] = {"old": aset, "new": {}}
return ret
ret["comment"] = "Failed to delete availability set {}!".format(name)
return ret | /saltext.azurerm-4.0.1.tar.gz/saltext.azurerm-4.0.1/src/saltext/azurerm/states/azurerm_compute_availability_set.py | 0.74008 | 0.159119 | azurerm_compute_availability_set.py | pypi |
import logging
import salt.utils.dictdiffer # pylint: disable=import-error
__virtualname__ = "azurerm_dns"
log = logging.getLogger(__name__)
def __virtual__():
"""
Only make this state available if the azurerm_dns module is available.
"""
if "azurerm_dns.zones_list_by_resource_group" in __salt__:
return __virtualname__
return (False, "azurerm_dns module could not be loaded")
def zone_present(
name,
resource_group,
etag=None,
if_match=None,
if_none_match=None,
registration_virtual_networks=None,
resolution_virtual_networks=None,
tags=None,
zone_type="Public",
connection_auth=None,
**kwargs
):
"""
.. versionadded:: 3000
Ensure a DNS zone exists.
:param name:
Name of the DNS zone (without a terminating dot).
:param resource_group:
The resource group assigned to the DNS zone.
:param etag:
The etag of the zone. `Etags <https://docs.microsoft.com/en-us/azure/dns/dns-zones-records#etags>`_ are used
to handle concurrent changes to the same resource safely.
:param if_match:
The etag of the DNS zone. Omit this value to always overwrite the current zone. Specify the last-seen etag
value to prevent accidentally overwritting any concurrent changes.
:param if_none_match:
Set to '*' to allow a new DNS zone to be created, but to prevent updating an existing zone. Other values will
be ignored.
:param registration_virtual_networks:
A list of references to virtual networks that register hostnames in this DNS zone. This is only when zone_type
is Private. (requires `azure-mgmt-dns <https://pypi.python.org/pypi/azure-mgmt-dns>`_ >= 2.0.0rc1)
:param resolution_virtual_networks:
A list of references to virtual networks that resolve records in this DNS zone. This is only when zone_type is
Private. (requires `azure-mgmt-dns <https://pypi.python.org/pypi/azure-mgmt-dns>`_ >= 2.0.0rc1)
:param tags:
A dictionary of strings can be passed as tag metadata to the DNS zone object.
:param zone_type:
The type of this DNS zone (Public or Private). Possible values include: 'Public', 'Private'.
Default value: 'Public'
(requires `azure-mgmt-dns <https://pypi.python.org/pypi/azure-mgmt-dns>`_ >= 2.0.0rc1)
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure DNS zone exists:
azurerm_dns.zone_present:
- name: contoso.com
- resource_group: my_rg
- zone_type: Private
- registration_virtual_networks:
- /subscriptions/{{ sub }}/resourceGroups/my_rg/providers/Microsoft.Network/virtualNetworks/test_vnet
- tags:
how_awesome: very
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if not isinstance(connection_auth, dict):
ret["comment"] = "Connection information must be specified via connection_auth dictionary!"
return ret
zone = __salt__["azurerm_dns.zone_get"](
name, resource_group, azurerm_log_level="info", **connection_auth
)
if "error" not in zone:
tag_changes = salt.utils.dictdiffer.deep_diff(zone.get("tags", {}), tags or {})
if tag_changes:
ret["changes"]["tags"] = tag_changes
# The zone_type parameter is only accessible in azure-mgmt-dns >=2.0.0rc1
if zone.get("zone_type"):
if zone.get("zone_type").lower() != zone_type.lower():
ret["changes"]["zone_type"] = {
"old": zone["zone_type"],
"new": zone_type,
}
if zone_type.lower() == "private":
# The registration_virtual_networks parameter is only accessible in azure-mgmt-dns >=2.0.0rc1
if registration_virtual_networks and not isinstance(
registration_virtual_networks, list
):
ret["comment"] = (
"registration_virtual_networks must be supplied as a list of"
" VNET ID paths!"
)
return ret
reg_vnets = zone.get("registration_virtual_networks", [])
remote_reg_vnets = sorted(vnet["id"].lower() for vnet in reg_vnets if "id" in vnet)
local_reg_vnets = sorted(
vnet.lower() for vnet in registration_virtual_networks or []
)
if local_reg_vnets != remote_reg_vnets:
ret["changes"]["registration_virtual_networks"] = {
"old": remote_reg_vnets,
"new": local_reg_vnets,
}
# The resolution_virtual_networks parameter is only accessible in azure-mgmt-dns >=2.0.0rc1
if resolution_virtual_networks and not isinstance(
resolution_virtual_networks, list
):
ret["comment"] = (
"resolution_virtual_networks must be supplied as a list of VNET"
" ID paths!"
)
return ret
res_vnets = zone.get("resolution_virtual_networks", [])
remote_res_vnets = sorted(vnet["id"].lower() for vnet in res_vnets if "id" in vnet)
local_res_vnets = sorted(vnet.lower() for vnet in resolution_virtual_networks or [])
if local_res_vnets != remote_res_vnets:
ret["changes"]["resolution_virtual_networks"] = {
"old": remote_res_vnets,
"new": local_res_vnets,
}
if not ret["changes"]:
ret["result"] = True
ret["comment"] = "DNS zone {} is already present.".format(name)
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "DNS zone {} would be updated.".format(name)
return ret
else:
ret["changes"] = {
"old": {},
"new": {
"name": name,
"resource_group": resource_group,
"etag": etag,
"registration_virtual_networks": registration_virtual_networks,
"resolution_virtual_networks": resolution_virtual_networks,
"tags": tags,
"zone_type": zone_type,
},
}
if __opts__["test"]:
ret["comment"] = "DNS zone {} would be created.".format(name)
ret["result"] = None
return ret
zone_kwargs = kwargs.copy()
zone_kwargs.update(connection_auth)
zone = __salt__["azurerm_dns.zone_create_or_update"](
name=name,
resource_group=resource_group,
etag=etag,
if_match=if_match,
if_none_match=if_none_match,
registration_virtual_networks=registration_virtual_networks,
resolution_virtual_networks=resolution_virtual_networks,
tags=tags,
zone_type=zone_type,
**zone_kwargs
)
if "error" not in zone:
ret["result"] = True
ret["comment"] = "DNS zone {} has been created.".format(name)
return ret
ret["comment"] = "Failed to create DNS zone {}! ({})".format(name, zone.get("error"))
return ret
def zone_absent(name, resource_group, zone_type="Public", connection_auth=None):
"""
.. versionadded:: 3000
Ensure a DNS zone does not exist in the resource group.
:param name:
Name of the DNS zone.
:param resource_group:
The resource group assigned to the DNS zone.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
:param zone_type:
The type of this DNS zone (Public or Private). Possible values include: 'Public', 'Private'.
Default value: 'Public'
(requires `azure-mgmt-dns <https://pypi.python.org/pypi/azure-mgmt-dns>`_ >= 2.0.0rc1)
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if not isinstance(connection_auth, dict):
ret["comment"] = "Connection information must be specified via connection_auth dictionary!"
return ret
zone = __salt__["azurerm_dns.zone_get"](
name, resource_group, azurerm_log_level="info", zone_type=zone_type, **connection_auth
)
if "error" in zone:
ret["result"] = True
ret["comment"] = "DNS zone {} was not found.".format(name)
return ret
elif __opts__["test"]:
ret["comment"] = "DNS zone {} would be deleted.".format(name)
ret["result"] = None
ret["changes"] = {
"old": zone,
"new": {},
}
return ret
deleted = __salt__["azurerm_dns.zone_delete"](
name, resource_group, zone_type=zone_type, **connection_auth
)
if deleted:
ret["result"] = True
ret["comment"] = "DNS zone {} has been deleted.".format(name)
ret["changes"] = {"old": zone, "new": {}}
return ret
ret["comment"] = "Failed to delete DNS zone {}!".format(name)
return ret
def record_set_present(
name,
zone_name,
resource_group,
record_type,
zone_type="Public",
if_match=None,
if_none_match=None,
etag=None,
metadata=None,
ttl=None,
arecords=None,
aaaa_records=None,
mx_records=None,
ns_records=None,
ptr_records=None,
srv_records=None,
txt_records=None,
cname_record=None,
soa_record=None,
caa_records=None,
connection_auth=None,
**kwargs
):
"""
.. versionadded:: 3000
Ensure a record set exists in a DNS zone.
:param name:
The name of the record set, relative to the name of the zone.
:param zone_name:
Name of the DNS zone (without a terminating dot).
:param resource_group:
The resource group assigned to the DNS zone.
:param record_type:
The type of DNS record in this record set. Record sets of type SOA can be updated but not created
(they are created when the DNS zone is created). Possible values include: 'A', 'AAAA', 'CAA', 'CNAME',
'MX', 'NS', 'PTR', 'SOA', 'SRV', 'TXT'
:param zone_type:
The type of this DNS zone (Public or Private). Possible values include: 'Public', 'Private'.
Default value: 'Public'
:param if_match:
The etag of the record set. Omit this value to always overwrite the current record set. Specify the last-seen
etag value to prevent accidentally overwritting any concurrent changes.
:param if_none_match:
Set to '*' to allow a new record set to be created, but to prevent updating an existing record set. Other values
will be ignored.
:param etag:
The etag of the record set. `Etags <https://docs.microsoft.com/en-us/azure/dns/dns-zones-records#etags>`__ are
used to handle concurrent changes to the same resource safely.
:param metadata:
A dictionary of strings can be passed as tag metadata to the record set object.
:param ttl:
The TTL (time-to-live) of the records in the record set. Required when specifying record information.
:param arecords:
The list of A records in the record set. View the
`Azure SDK documentation
<https://docs.microsoft.com/en-us/python/api/azure.mgmt.dns.models.arecord?view=azure-python>`__
to create a list of dictionaries representing the record objects.
:param aaaa_records:
The list of AAAA records in the record set. View the
`Azure SDK documentation
<https://docs.microsoft.com/en-us/python/api/azure.mgmt.dns.models.aaaarecord?view=azure-python>`__
to create a list of dictionaries representing the record objects.
:param mx_records:
The list of MX records in the record set. View the
`Azure SDK documentation
<https://docs.microsoft.com/en-us/python/api/azure.mgmt.dns.models.mxrecord?view=azure-python>`__
to create a list of dictionaries representing the record objects.
:param ns_records:
The list of NS records in the record set. View the
`Azure SDK documentation
<https://docs.microsoft.com/en-us/python/api/azure.mgmt.dns.models.nsrecord?view=azure-python>`__
to create a list of dictionaries representing the record objects.
:param ptr_records:
The list of PTR records in the record set. View the
`Azure SDK documentation
<https://docs.microsoft.com/en-us/python/api/azure.mgmt.dns.models.ptrrecord?view=azure-python>`__
to create a list of dictionaries representing the record objects.
:param srv_records:
The list of SRV records in the record set. View the
`Azure SDK documentation
<https://docs.microsoft.com/en-us/python/api/azure.mgmt.dns.models.srvrecord?view=azure-python>`__
to create a list of dictionaries representing the record objects.
:param txt_records:
The list of TXT records in the record set. View the
`Azure SDK documentation
<https://docs.microsoft.com/en-us/python/api/azure.mgmt.dns.models.txtrecord?view=azure-python>`__
to create a list of dictionaries representing the record objects.
:param cname_record:
The CNAME record in the record set. View the
`Azure SDK documentation
<https://docs.microsoft.com/en-us/python/api/azure.mgmt.dns.models.cnamerecord?view=azure-python>`__
to create a dictionary representing the record object.
:param soa_record:
The SOA record in the record set. View the
`Azure SDK documentation
<https://docs.microsoft.com/en-us/python/api/azure.mgmt.dns.models.soarecord?view=azure-python>`__
to create a dictionary representing the record object.
:param caa_records:
The list of CAA records in the record set. View the
`Azure SDK documentation
<https://docs.microsoft.com/en-us/python/api/azure.mgmt.dns.models.caarecord?view=azure-python>`__
to create a list of dictionaries representing the record objects.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure record set exists:
azurerm_dns.record_set_present:
- name: web
- zone_name: contoso.com
- resource_group: my_rg
- record_type: A
- ttl: 300
- arecords:
- ipv4_address: 10.0.0.1
- metadata:
how_awesome: very
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
record_vars = [
"arecords",
"aaaa_records",
"mx_records",
"ns_records",
"ptr_records",
"srv_records",
"txt_records",
"cname_record",
"soa_record",
"caa_records",
]
if not isinstance(connection_auth, dict):
ret["comment"] = "Connection information must be specified via connection_auth dictionary!"
return ret
rec_set = __salt__["azurerm_dns.record_set_get"](
name,
zone_name,
resource_group,
record_type,
zone_type,
azurerm_log_level="info",
**connection_auth
)
if "error" not in rec_set:
metadata_changes = salt.utils.dictdiffer.deep_diff(
rec_set.get("metadata", {}), metadata or {}
)
if metadata_changes:
ret["changes"]["metadata"] = metadata_changes
for record_str in record_vars:
# pylint: disable=eval-used
record = eval(record_str)
if record:
if not ttl:
ret["comment"] = "TTL is required when specifying record information!"
return ret
if not rec_set.get(record_str):
ret["changes"] = {"new": {record_str: record}}
continue
if record_str[-1] != "s":
if not isinstance(record, dict):
ret[
"comment"
] = "{} record information must be specified as a dictionary!".format(
record_str
)
return ret
for key, val in record.items():
if val != rec_set[record_str].get(key):
ret["changes"] = {"new": {record_str: record}}
elif record_str[-1] == "s":
if not isinstance(record, list):
ret["comment"] = (
"{} record information must be specified as a list of"
" dictionaries!".format(record_str)
)
return ret
local, remote = (sorted(config) for config in (record, rec_set[record_str]))
for val in local:
for key in val:
local_val = val[key]
remote_val = remote.get(key)
if isinstance(local_val, str):
local_val = local_val.lower()
if isinstance(remote_val, str):
remote_val = remote_val.lower()
if local_val != remote_val:
ret["changes"] = {"new": {record_str: record}}
if not ret["changes"]:
ret["result"] = True
ret["comment"] = "Record set {} is already present.".format(name)
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Record set {} would be updated.".format(name)
return ret
else:
ret["changes"] = {
"old": {},
"new": {
"name": name,
"zone_name": zone_name,
"resource_group": resource_group,
"record_type": record_type,
"etag": etag,
"metadata": metadata,
"ttl": ttl,
},
}
for record in record_vars:
# pylint: disable=eval-used
if eval(record):
# pylint: disable=eval-used
ret["changes"]["new"][record] = eval(record)
if __opts__["test"]:
ret["comment"] = "Record set {} would be created.".format(name)
ret["result"] = None
return ret
rec_set_kwargs = kwargs.copy()
rec_set_kwargs.update(connection_auth)
rec_set = __salt__["azurerm_dns.record_set_create_or_update"](
name=name,
zone_name=zone_name,
resource_group=resource_group,
record_type=record_type,
zone_type=zone_type,
if_match=if_match,
if_none_match=if_none_match,
etag=etag,
ttl=ttl,
metadata=metadata,
arecords=arecords,
aaaa_records=aaaa_records,
mx_records=mx_records,
ns_records=ns_records,
ptr_records=ptr_records,
srv_records=srv_records,
txt_records=txt_records,
cname_record=cname_record,
soa_record=soa_record,
caa_records=caa_records,
**rec_set_kwargs
)
if "error" not in rec_set:
ret["result"] = True
ret["comment"] = "Record set {} has been created.".format(name)
return ret
ret["comment"] = "Failed to create record set {}! ({})".format(name, rec_set.get("error"))
return ret
def record_set_absent(
name, zone_name, resource_group, record_type, zone_type="Public", connection_auth=None
):
"""
.. versionadded:: 3000
Ensure a record set does not exist in the DNS zone.
:param name:
Name of the record set.
:param zone_name:
Name of the DNS zone.
:param resource_group:
The resource group assigned to the DNS zone.
:param record_type:
The type of DNS record in this record set. Record sets of type SOA can be updated but not created
(they are created when the DNS zone is created). Possible values include: 'A', 'AAAA', 'CAA', 'CNAME',
'MX', 'NS', 'PTR', 'SOA', 'SRV', 'TXT'
:param zone_type: The type of DNS zone (the default is set to "Public")
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if not isinstance(connection_auth, dict):
ret["comment"] = "Connection information must be specified via connection_auth dictionary!"
return ret
rec_set = __salt__["azurerm_dns.record_set_get"](
name,
zone_name,
resource_group,
record_type=record_type,
azurerm_log_level="info",
zone_type=zone_type,
**connection_auth
)
if "error" in rec_set:
ret["result"] = True
ret["comment"] = "Record set {} was not found in zone {}.".format(name, zone_name)
return ret
elif __opts__["test"]:
ret["comment"] = "Record set {} would be deleted.".format(name)
ret["result"] = None
ret["changes"] = {
"old": rec_set,
"new": {},
}
return ret
deleted = __salt__["azurerm_dns.record_set_delete"](
name,
zone_name,
resource_group,
record_type=record_type,
zone_type=zone_type,
**connection_auth
)
if deleted:
ret["result"] = True
ret["comment"] = "Record set {} has been deleted.".format(name)
ret["changes"] = {"old": rec_set, "new": {}}
return ret
ret["comment"] = "Failed to delete record set {}!".format(name)
return ret | /saltext.azurerm-4.0.1.tar.gz/saltext.azurerm-4.0.1/src/saltext/azurerm/states/azurerm_dns.py | 0.818882 | 0.159152 | azurerm_dns.py | pypi |
import logging
__virtualname__ = "azurerm_compute"
log = logging.getLogger(__name__)
def __virtual__():
"""
Only make this state available if the azurerm_compute module is available.
"""
if "azurerm_compute.availability_set_create_or_update" in __salt__:
return __virtualname__
return (False, "azurerm module could not be loaded")
def availability_set_present(
name,
resource_group,
tags=None,
platform_update_domain_count=None,
platform_fault_domain_count=None,
virtual_machines=None,
sku=None,
connection_auth=None,
**kwargs
):
"""
.. versionadded:: 2019.2.0
**WARNING: This function has been moved to another file (azurerm_compute_availability_set.py)
and will be deprecated in the future.**
Ensure an availability set exists.
:param name:
Name of the availability set.
:param resource_group:
The resource group assigned to the availability set.
:param tags:
A dictionary of strings can be passed as tag metadata to the availability set object.
:param platform_update_domain_count:
An optional parameter which indicates groups of virtual machines and underlying physical hardware that can be
rebooted at the same time.
:param platform_fault_domain_count:
An optional parameter which defines the group of virtual machines that share a common power source and network
switch.
:param virtual_machines:
A list of names of existing virtual machines to be included in the availability set.
:param sku:
The availability set SKU, which specifies whether the availability set is managed or not. Possible values are
'Aligned' or 'Classic'. An 'Aligned' availability set is managed, 'Classic' is not.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure availability set exists:
azurerm_compute.availability_set_present:
- name: aset1
- resource_group: group1
- platform_update_domain_count: 5
- platform_fault_domain_count: 3
- sku: aligned
- tags:
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
- require:
- azurerm_resource: Ensure resource group exists
"""
return __states__["azurerm_compute_availability_set.present"](
name=name,
resource_group=resource_group,
tags=tags,
platform_update_domain_count=platform_update_domain_count,
platform_fault_domain_count=platform_fault_domain_count,
virtual_machines=virtual_machines,
sku=sku,
connection_auth=connection_auth,
**kwargs,
)
def availability_set_absent(name, resource_group, connection_auth=None):
"""
.. versionadded:: 2019.2.0
**WARNING: This function has been moved to another file (azurerm_compute_availability_set.py)
and will be deprecated in the future.**
Ensure an availability set does not exist in a resource group.
:param name:
Name of the availability set.
:param resource_group:
Name of the resource group containing the availability set.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
"""
return __states__["azurerm_compute_availability_set.absent"](
name=name,
resource_group=resource_group,
connection_auth=connection_auth,
) | /saltext.azurerm-4.0.1.tar.gz/saltext.azurerm-4.0.1/src/saltext/azurerm/states/azurerm_compute.py | 0.817101 | 0.152916 | azurerm_compute.py | pypi |
import logging
import salt.exceptions
import saltext.bitwarden.utils.bitwarden_mod as utils
log = logging.getLogger(__name__)
__func_alias__ = {"set_": "set"}
__virtualname__ = "bitwarden"
def __virtual__():
# To force a module not to load return something like:
# return (False, "The bitwarden sdb module is not implemented yet")
return __virtualname__
def _get_config(profile=None): # pylint: disable=C0116
if profile.get("driver") != "bitwarden":
log.error("The specified profile is not a bitwarden profile")
return {}
if not profile:
log.debug("The config is not set")
return {}
return profile
def delete(key, profile=None):
"""
Delete a value from a Bitwarden vault item.
This function only deletes individual values within a vault item, and will
not delete entire items. To delete items, please use one of the execution,
runner, or state modules instead.
"""
raise salt.exceptions.NotImplemented()
def set_(*args, **kwargs):
"""
Set a value in a Bitwarden vault item.
This function only sets individual values within a vault item, and will not
create new items. To create new items, please use one of the execution,
runner, or state modules instead.
"""
raise salt.exceptions.NotImplemented()
def get(key, profile=None):
"""
Get a value from a Bitwarden vault item.
This function only gets individual values within a vault item, and will not
retrieve an entire item. To retrieve entire items, please use one of the
execution, pillar, runner, or state modules instead.
"""
opts = _get_config(profile=profile)
args = key.split("/")
if len(args) < 3 or len(args) > 4:
log.error("Invalid number of arguments in sdb path.")
return None
if args[0] == "by-uuid":
if args[2] in ["username", "password", "totp", "notes"]:
item = utils.get_item(opts=opts, item_id=args[1])
if args[2] == "username":
return item["login"]["username"]
elif args[2] == "password":
return item["login"]["password"]
elif args[2] == "totp":
return item["login"]["totp"]
elif args[2] == "notes":
return item["notes"]
elif args[2] == "fields" and len(args) == 4:
item = utils.get_item(opts=opts, item_id=args[1])
value = None
for field in item["fields"]:
if field["name"] == args[3]:
if value is not None:
log.error('Supplied field name "%s" is not unique', args[3])
return None
else:
value = field["value"]
return value
else:
log.error(
'Supplied object "%s" is not one of: username, password, totp, notes.', args[2]
)
return None
else:
log.error('Supplied locator method "%s" is invalid.', args[0])
return None
return None | /saltext.bitwarden-0.0.1a6.tar.gz/saltext.bitwarden-0.0.1a6/src/saltext/bitwarden/sdb/bitwarden_mod.py | 0.427397 | 0.185523 | bitwarden_mod.py | pypi |
# Contributor Covenant Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in salt-ext-heist project and our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, religion, or sexual identity
and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the
overall community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or
advances of any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email
address, without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at oss-coc@@vmware.com.
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series
of actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or
permanent ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within
the community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.0, available at
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
Community Impact Guidelines were inspired by [Mozilla's code of conduct
enforcement ladder](https://github.com/mozilla/diversity).
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see the FAQ at
https://www.contributor-covenant.org/faq. Translations are available at
https://www.contributor-covenant.org/translations.
| /saltext.heist-1.0.0b0.tar.gz/saltext.heist-1.0.0b0/CODE_OF_CONDUCT.md | 0.572006 | 0.665988 | CODE_OF_CONDUCT.md | pypi |
# Contributor Covenant Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in Salt
Extension Modules for Azure Resource Manager project and our community a
harassment-free experience for everyone, regardless of age, body size, visible
or invisible disability, ethnicity, sex characteristics, gender identity and
expression, level of experience, education, socio-economic status, nationality,
personal appearance, race, religion, or sexual identity and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the
overall community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or
advances of any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email
address, without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at devops@eitr.tech.
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series
of actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or
permanent ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within
the community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.0, available at
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
Community Impact Guidelines were inspired by [Mozilla's code of conduct
enforcement ladder](https://github.com/mozilla/diversity).
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see the FAQ at
https://www.contributor-covenant.org/faq. Translations are available at
https://www.contributor-covenant.org/translations.
| /saltext.proxmox-1.0.0.tar.gz/saltext.proxmox-1.0.0/CODE-OF-CONDUCT.md | 0.62223 | 0.653673 | CODE-OF-CONDUCT.md | pypi |
# Contributor Covenant Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in salt-describe project and our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, religion, or sexual identity
and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the
overall community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or
advances of any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email
address, without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at oss-coc@vmware.com.
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series
of actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or
permanent ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within
the community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.0, available at
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
Community Impact Guidelines were inspired by [Mozilla's code of conduct
enforcement ladder](https://github.com/mozilla/diversity).
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see the FAQ at
https://www.contributor-covenant.org/faq. Translations are available at
https://www.contributor-covenant.org/translations.
| /saltext.salt-describe-1.0.0.tar.gz/saltext.salt-describe-1.0.0/CODE_OF_CONDUCT.md | 0.582491 | 0.700229 | CODE_OF_CONDUCT.md | pypi |
import logging
import sys
import yaml
from saltext.salt_describe.utils.init import generate_files
from saltext.salt_describe.utils.init import parse_salt_ret
from saltext.salt_describe.utils.init import ret_info
from saltext.salt_describe.utils.salt_describe import generate_pillars
__virtualname__ = "describe"
log = logging.getLogger(__name__)
def __virtual__():
return __virtualname__
def user(
tgt,
require_groups=False,
minimum_uid=None,
maximum_uid=None,
minimum_gid=None,
maximum_gid=None,
tgt_type="glob",
config_system="salt",
):
"""
read users on the minions and build a state file
to manage the users.
CLI Example:
.. code-block:: bash
salt-run describe.user minion-tgt
"""
mod_name = sys._getframe().f_code.co_name
log.info("Attempting to generate SLS file for %s", mod_name)
state_contents = {}
if require_groups is True:
__salt__["describe.group"](
tgt=tgt,
include_members=False,
minimum_gid=minimum_gid,
maximum_gid=maximum_gid,
tgt_type=tgt_type,
)
users = __salt__["salt.execute"](
tgt,
"user.getent",
tgt_type=tgt_type,
)
pillars = {"users": {}}
sls_files = []
if not parse_salt_ret(ret=users, tgt=tgt):
return ret_info(sls_files, mod=mod_name)
for minion in list(users.keys()):
for user in users[minion]:
if minimum_uid and int(user["uid"]) <= minimum_uid:
continue
if maximum_uid and int(user["uid"]) >= maximum_uid:
continue
shadow = __salt__["salt.execute"](
minion, "shadow.info", arg=[user["name"]], tgt_type="glob"
)[minion]
homeexists = __salt__["salt.execute"](
minion, "file.directory_exists", arg=[user["home"]], tgt_type="glob"
)[minion]
username = user["name"]
payload = [
{"name": username},
{"uid": user["uid"]},
{"gid": user["gid"]},
{"allow_uid_change": True},
{"allow_gid_change": True},
{"home": user["home"]},
{"shell": user["shell"]},
{"groups": user["groups"]},
{"password": f'{{{{ salt["pillar.get"]("users:{username}","*") }}}}'},
{"enforce_password": True},
{"date": shadow["lstchg"]},
{"mindays": shadow["min"]},
{"maxdays": shadow["max"]},
{"inactdays": shadow["inact"]},
{"expire": shadow["expire"]},
]
if homeexists:
payload.append({"createhome": True})
else:
payload.append({"createhome": False})
# GECOS
if user["fullname"]:
payload.append({"fullname": user["fullname"]})
if user["homephone"]:
payload.append({"homephone": user["homephone"]})
if user["other"]:
payload.append({"other": user["other"]})
if user["roomnumber"]:
payload.append({"roomnumber": user["roomnumber"]})
if user["workphone"]:
payload.append({"workphone": user["workphone"]})
state_contents[f"user-{username}"] = {"user.present": payload}
passwd = shadow["passwd"]
if passwd != "*":
pillars["users"].update({user["name"]: f"{passwd}"})
state = yaml.dump(state_contents)
pillars = yaml.dump(pillars)
sls_files.append(
generate_files(__opts__, minion, state, sls_name="users", config_system=config_system)
)
generate_pillars(__opts__, minion, pillars, sls_name="users")
return ret_info(sls_files, mod=mod_name)
def group(
tgt,
include_members=False,
minimum_gid=None,
maximum_gid=None,
tgt_type="glob",
config_system="salt",
):
"""
read groups on the minions and build a state file
to managed th groups.
CLI Example:
.. code-block:: bash
salt-run describe.group minion-tgt
"""
mod_name = sys._getframe().f_code.co_name
groups = __salt__["salt.execute"](
tgt,
"group.getent",
tgt_type=tgt_type,
)
if not parse_salt_ret(ret=groups, tgt=tgt):
return ret_info(sls_files, mod=mod_name)
state_contents = {}
sls_files = []
for minion in list(groups.keys()):
for group in groups[minion]:
if minimum_gid and int(group["gid"]) <= minimum_gid:
continue
groupname = group["name"]
payload = [{"name": groupname}, {"gid": group["gid"]}]
if include_members is True:
payload.append({"members": group["members"]})
state_contents[f"group-{groupname}"] = {"group.present": payload}
state = yaml.dump(state_contents)
sls_files.append(
generate_files(__opts__, minion, state, sls_name="groups", config_system=config_system)
)
return ret_info(sls_files, mod=mod_name) | /saltext.salt-describe-1.0.0.tar.gz/saltext.salt-describe-1.0.0/src/saltext/salt_describe/runners/salt_describe_user.py | 0.478041 | 0.185228 | salt_describe_user.py | pypi |
import logging
import pathlib
from inspect import getargspec
from inspect import Parameter
from inspect import signature
import salt.daemons.masterapi # pylint: disable=import-error
import salt.utils.files # pylint: disable=import-error
import yaml
from saltext.salt_describe.utils.init import ret_info
__virtualname__ = "describe"
__func_alias__ = {"all_": "all", "top_": "top"}
log = logging.getLogger(__name__)
def __virtual__():
return __virtualname__
def _exclude_from_all(func):
"""
Decorator to exclude functions from all function
"""
func.__all_excluded__ = True
return func
def _get_all_single_describe_methods():
"""
Get all methods that should be run in `all`
"""
single_functions = [
(name.replace("describe.", ""), loaded_func)
for name, loaded_func in __salt__.items()
if name.startswith("describe")
]
names = {}
for name, loaded_func in single_functions:
if getattr(loaded_func, "__all_excluded__", False):
continue
names[name] = loaded_func
return names
@_exclude_from_all
def all_(tgt, top=True, include=None, exclude=None, config_system="salt", **kwargs):
"""
Run all describe methods against target.
One of either a exclude or include can be given to specify
which functions to run. These can be either a string or python list.
CLI Example:
.. code-block:: bash
salt-run describe.all minion-tgt exclude='["file", "user"]'
You can supply args and kwargs to functions that require them as well.
These are passed as explicit kwargs.
CLI Example:
.. code-block:: bash
salt-run describe.all minion-tgt include='["file", "pip"]' paths='["/tmp/testfile", "/tmp/testfile2"]'
If two functions take an arg or kwarg of the same name, you can differentiate them
by prefixing the argument name.
CLI Example:
.. code-block:: bash
salt-run describe.all minion-tgt include='["file", "pip"]' file_paths='["/tmp/testfile", "/tmp/testfile2"]'
"""
if exclude and include:
log.error("Only one of exclude and include can be provided")
return False
all_methods = _get_all_single_describe_methods()
# Sanitize the include and exclude to the extremes if none are given
if exclude is None:
exclude = set()
elif isinstance(exclude, str):
exclude = {exclude}
elif isinstance(exclude, (list, tuple)):
exclude = set(exclude)
if include is None:
include = all_methods.keys()
elif isinstance(include, str):
include = {include}
elif isinstance(include, (list, tuple)):
include = set(include)
# The set difference gives us all the allowed methods here
allowed_method_names = include - exclude
allowed_methods = {
name: func for name, func in all_methods.items() if name in allowed_method_names
}
log.debug("Allowed methods in all: %s", allowed_methods)
def _get_arg_for_func(p_name, func_name, kwargs):
"""
Return the argument value and whether or not it failed to find
"""
# Allow more specific arg to take precendence
spec_name = f"{func_name}_{p_name}"
if spec_name in kwargs:
return kwargs.get(spec_name), False
if p_name in kwargs:
return kwargs.get(p_name), False
return None, True
kwargs["tgt"] = tgt
kwargs["config_system"] = config_system
sls_files = []
for name, func in allowed_methods.items():
sig = signature(func)
call_args = []
call_kwargs = {}
args, _, _, defaults = getargspec(func)
args = args[: -len(defaults)]
misg_req_arg = False
for p_name, p_obj in sig.parameters.items():
p_value, failed = _get_arg_for_func(p_name, name, kwargs)
# Take care of required args and kwargs
if failed and p_obj.kind == Parameter.POSITIONAL_ONLY or p_name in args and not p_value:
log.error("Missing positional arg %s for describe.%s", p_name, name)
misg_req_arg = True
# we can still continue trying to generate other SLS files even if a
# required arg is not available for a specific module
break
if failed and p_obj.kind == Parameter.KEYWORD_ONLY and p_obj.default == Parameter.empty:
log.error("Missing required keyword arg %s for describe.%s", p_name, name)
misg_req_arg = True
# we can still continue trying to generate other SLS files even if a
# required arg is not available for a specific module
break
# We can fail to find some args
if failed:
continue
if (
p_obj.kind in (Parameter.POSITIONAL_ONLY, Parameter.POSITIONAL_OR_KEYWORD)
and p_name in args
):
call_args.append(p_value)
elif p_obj.kind == Parameter.VAR_POSITIONAL:
if not isinstance(p_value, list):
log.error(f"{p_name} must be a Python list")
return False
call_args.extend(p_value)
elif p_obj.kind == Parameter.KEYWORD_ONLY:
call_kwargs[p_name] = p_value
elif p_obj.kind == Parameter.VAR_KEYWORD:
if not isinstance(p_value, dict):
log.error(f"{p_name} must be a Python dictionary")
return False
call_kwargs.update(p_value)
elif p_name not in args:
call_kwargs[p_name] = p_value
if misg_req_arg:
continue
try:
bound_sig = sig.bind(*call_args, **call_kwargs)
except TypeError:
log.error(f"Invalid args, kwargs for signature of {name}: {call_args}, {call_kwargs}")
return False
log.debug(
"Running describe.%s in all -- tgt: %s\targs: %s\tkwargs: %s",
name,
tgt,
bound_sig.args,
bound_sig.kwargs,
)
try:
# This follows the unwritten standard that the minion target must be the first argument
log.debug(f"Generating SLS for {name} module")
ret = __salt__[f"describe.{name}"](*bound_sig.args, **bound_sig.kwargs)
if isinstance(ret, dict):
sls_files = sls_files + list(ret.values())[0]
else:
log.error(f"Could not generate the SLS file for {name}")
except TypeError as err:
log.error(err.args[0])
# generate the top file
if top:
__salt__["describe.top"](tgt)
return ret_info(sls_files)
@_exclude_from_all
def top_(tgt, tgt_type="glob", env="base"):
"""
Add the generated states to top.sls
CLI Example:
.. code-block:: bash
salt-run describe.top minion-tgt
"""
# Gather minions based on tgt and tgt_type arguments
masterapi = salt.daemons.masterapi.RemoteFuncs(__opts__)
minions = masterapi.local.gather_minions(tgt, tgt_type)
state_file_root = pathlib.Path(__salt__["config.get"]("file_roots:base")[0])
top_file = state_file_root / "top.sls"
if not top_file.is_file():
top_file.touch()
top_file_dict = {}
with salt.utils.files.fopen(top_file, "r") as fp_:
top_data = yaml.safe_load(fp_.read())
if top_data:
top_file_dict = top_data
if env not in top_file_dict:
top_file_dict[env] = {}
for minion in minions:
sls_files = []
add_top = []
minion_file_root = state_file_root / minion
if not minion_file_root.exists():
log.error(f"The file root path {minion_file_root} does not exist")
return False
for file in minion_file_root.iterdir():
if file.suffix == ".sls" and file.stem != "init":
sls_files.append(minion + "." + file.stem)
# Check to see if the SLS file already exists in top file
for sls in sls_files:
if sls not in top_file_dict[env].get(minion, []):
add_top.append(sls)
if minion not in top_file_dict[env]:
top_file_dict[env][minion] = add_top
else:
for _top_file in add_top:
top_file_dict[env][minion].append(_top_file)
if add_top:
with salt.utils.files.fopen(top_file, "w") as fp_:
fp_.write(yaml.dump(top_file_dict))
else:
return {"Top file was not changed, alread contains correct SLS files": str(top_file)}
return ret_info(str(top_file), mod="top file")
@_exclude_from_all
def pillar_top(tgt, tgt_type="glob", env="base"):
"""
Add the generated pillars to top.sls
CLI Example:
.. code-block:: bash
salt-run describe.top minion-tgt
"""
# Gather minions based on tgt and tgt_type arguments
masterapi = salt.daemons.masterapi.RemoteFuncs(__opts__)
minions = masterapi.local.gather_minions(tgt, tgt_type)
pillar_file_root = pathlib.Path(__salt__["config.get"]("pillar_roots:base")[0])
top_file = pillar_file_root / "top.sls"
if not top_file.is_file():
top_file.touch()
top_file_dict = {}
with salt.utils.files.fopen(top_file, "r") as fp_:
top_file_dict = yaml.safe_load(fp_.read())
if env not in top_file_dict:
top_file_dict[env] = {}
for minion in minions:
add_top = []
minion_pillar_root = pillar_file_root / minion
for file in minion_pillar_root.iterdir():
if file.suffix == ".sls" and file.stem != "init":
add_top.append(minion + "." + file.stem)
if minion not in top_file_dict[env]:
top_file_dict[env][minion] = add_top
else:
top_file_dict[env][minion].append(add_top)
with salt.utils.files.fopen(top_file, "w") as fp_:
fp_.write(yaml.dump(top_file_dict))
return True | /saltext.salt-describe-1.0.0.tar.gz/saltext.salt-describe-1.0.0/src/saltext/salt_describe/runners/salt_describe.py | 0.554953 | 0.157785 | salt_describe.py | pypi |
import logging
from saltext.vmware.modules import vmc_sddc
from saltext.vmware.utils import vmc_constants
from saltext.vmware.utils import vmc_request
from saltext.vmware.utils import vmc_templates
log = logging.getLogger(__name__)
__virtualname__ = "vmc_sddc_host"
def __virtual__():
return __virtualname__
def manage(
hostname,
refresh_key,
authorization_host,
org_id,
sddc_id,
num_hosts,
availability_zone=None,
cluster_id=None,
esxs=None,
strict_placement=False,
action=None,
verify_ssl=True,
cert=None,
):
"""
Add/remove host for a given SDDC
Please refer the `VMC Create ESXs documentation <https://developer.vmware.com/docs/vmc/latest/vmc/api/orgs/org/sddcs/sddc/esxs/post/>`_ to get insight of functionality and input parameters
CLI Example:
.. code-block:: bash
salt <minion-key-id> vmc_sddc_host.manage hostname=vmc.vmware.com ...
hostname
The host name of VMC
refresh_key
API Token of the user which is used to get the Access Token required for VMC operations
authorization_host
Hostname of the Cloud Services Platform (CSP)
org_id
The Id of organization to which the SDDC belongs to
sddc_id
The Id of SDDC for which the host would be added/removed
num_hosts: Integer
(Required) Count of Hosts that would be added/removed for the given SDDC
availability_zone: String
(Optional) Availability zone where the hosts should be provisioned.
(Can be specified only for privileged host operations).
cluster_id: String
(Optional) An optional cluster id if the esxs operation has to be on a specific cluster.
esxs: Array Of String As UUID
(Optional) An optional list of ESX IDs to remove.
strict_placement: Boolean
(Optional) An option to indicate if the host needs to be strictly placed in a placement group.
Fail the operation otherwise.
action: String
(Optional)
If action = 'add', will add the esx.
If action = 'remove', will delete the esx/esxs bound to a single cluster(Cluster Id is mandatory for non cluster 1 esx remove).
If action = 'force-remove', will delete the esx even if it can lead to data loss (This is an privileged operation).
If action = 'addToAll', will add esxs to all clusters in the SDDC (This is an privileged operation).
If action = 'removeFromAll', will delete the esxs from all clusters in the SDDC (This is an privileged operation).
If action = 'attach-diskgroup', will attach the provided diskgroups to a given host (privileged).
If action = 'detach-diskgroup', will detach the diskgroups of a given host (privileged).
Default behaviour is 'add'
verify_ssl
(Optional) Option to enable/disable SSL verification. Enabled by default.
If set to False, the certificate validation is skipped.
cert
(Optional) Path to the SSL client certificate file to connect to VMC Cloud Console.
The certificate can be retrieved from browser.
For example:
.. code::
{
"availability_zone": "us-west-2a",
"cluster_id": "e97920ae-1410-4269-9caa-29584eb8cf6d",
"esxs": [
"94ce40e1-8619-45b5-9817-0f3466d0dc78"
],
"num_hosts": 1,
"strict_placement": false
}
"""
log.info("Managing host for the SDDC %s", sddc_id)
api_base_url = vmc_request.set_base_url(hostname)
api_url = "{base_url}vmc/api/orgs/{org_id}/sddcs/{sddc_id}/esxs".format(
base_url=api_base_url, org_id=org_id, sddc_id=sddc_id
)
allowed_dict = {
"num_hosts": num_hosts,
"availability_zone": availability_zone,
"cluster_id": cluster_id,
"esxs": esxs,
"strict_placement": strict_placement,
}
req_data = vmc_request._filter_kwargs(allowed_kwargs=allowed_dict.keys(), **allowed_dict)
params = vmc_request._filter_kwargs(allowed_kwargs=["action"], action=action)
request_data = vmc_request.create_payload_for_request(vmc_templates.manage_sddc_host, req_data)
return vmc_request.call_api(
method=vmc_constants.POST_REQUEST_METHOD,
url=api_url,
refresh_key=refresh_key,
authorization_host=authorization_host,
description="vmc_sddc_host.manage",
data=request_data,
params=params,
verify_ssl=verify_ssl,
cert=cert,
)
def get(hostname, refresh_key, authorization_host, org_id, sddc_id, verify_ssl=True, cert=None):
"""
Retrieves ESX hosts for the given SDDC
Please refer the `VMC Get SDDC documentation <https://developer.vmware.com/docs/vmc/latest/vmc/api/orgs/org/sddcs/sddc/get/>`_ to get insight of functionality and input parameters
CLI Example:
.. code-block:: bash
salt <minion-key-id> vmc_sddc_host.get hostname=vmc.vmware.com ...
hostname
The host name of VMC
refresh_key
API Token of the user which is used to get the Access Token required for VMC operations
authorization_host
Hostname of the Cloud Services Platform (CSP)
org_id
The Id of organization to which the SDDC belongs to
sddc_id
The Id of SDDC for which the hosts would be retrieved
verify_ssl
(Optional) Option to enable/disable SSL verification. Enabled by default.
If set to False, the certificate validation is skipped.
cert
(Optional) Path to the SSL client certificate file to connect to VMC Cloud Console.
The certificate can be retrieved from browser.
"""
log.info("Retrieving hosts for SDDC {} %s", sddc_id)
sddc_detail = vmc_sddc.get_by_id(
hostname=hostname,
refresh_key=refresh_key,
authorization_host=authorization_host,
org_id=org_id,
sddc_id=sddc_id,
verify_ssl=verify_ssl,
cert=cert,
)
if "error" in sddc_detail:
return sddc_detail
esx_hosts_details = sddc_detail["resource_config"]["esx_hosts"]
result = {"description": "vmc_sddc_host.get", "esx_hosts_details": esx_hosts_details}
return result | /saltext.vmware-22.3.7.0rc1.tar.gz/saltext.vmware-22.3.7.0rc1/src/saltext/vmware/modules/vmc_sddc_host.py | 0.689515 | 0.267468 | vmc_sddc_host.py | pypi |
import logging
from saltext.vmware.utils import common
from saltext.vmware.utils import nsxt_request
log = logging.getLogger(__name__)
__virtual_name__ = "nsxt_ip_pools"
IP_POOLS_BASE_URL = "https://{}/api/v1/pools/ip-pools"
def __virtual__():
return __virtual_name__
def get(
hostname,
username,
password,
verify_ssl=True,
cert=None,
cert_common_name=None,
cursor=None,
included_fields=None,
page_size=None,
sort_by=None,
sort_ascending=None,
):
"""
Lists all IP Address pools present in the NSX-T Manager
CLI Example:
.. code-block:: bash
salt vm_minion nsxt_ip_pools.get hostname=nsxt-manager.local username=admin ...
hostname
The host name of NSX-T manager
username
Username to connect to NSX-T manager
password
Password to connect to NSX-T manager
verify_ssl
(Optional) Option to enable/disable SSL verification. Enabled by default.
If set to False, the certificate validation is skipped.
cert
(Optional) Path to the SSL client certificate file to connect to NSX-T manager.
The certificate can be retrieved from browser.
cert_common_name
(Optional) By default, the hostname parameter and the common name in certificate is compared for host name
verification. If the client certificate common name and hostname do not match (in case of self-signed
certificates), specify the certificate common name as part of this parameter. This value is then used to
compare against
cursor
(Optional) Opaque cursor to be used for getting next page of records (supplied by current result page)
included_fields
(Optional) Comma separated list of fields that should be included in query result
page_size
(Optional) Maximum number of results to return in this page
sort_by
(Optional) Field by which records are sorted
sort_ascending
(Optional) Boolean value to sort result in ascending order
"""
log.info("Fetching IP Address Pools")
url = IP_POOLS_BASE_URL.format(hostname)
params = common._filter_kwargs(
allowed_kwargs=["cursor", "included_fields", "page_size", "sort_ascending", "sort_by"],
default_dict=None,
cursor=cursor,
included_fields=included_fields,
page_size=page_size,
sort_by=sort_by,
sort_ascending=sort_ascending,
)
return nsxt_request.call_api(
method="get",
url=url,
username=username,
password=password,
cert_common_name=cert_common_name,
verify_ssl=verify_ssl,
cert=cert,
params=params,
)
def get_by_display_name(
hostname, username, password, display_name, verify_ssl=True, cert=None, cert_common_name=None
):
"""
Gets IP Address pool present in the NSX-T Manager with given name.
CLI Example:
.. code-block:: bash
salt vm_minion nsxt_ip_pools.get_by_display_name hostname=nsxt-manager.local username=admin ...
hostname
The host name of NSX-T manager
username
Username to connect to NSX-T manager
password
Password to connect to NSX-T manager
display_name
The name of IP Address pool to fetch
verify_ssl
(Optional) Option to enable/disable SSL verification. Enabled by default.
If set to False, the certificate validation is skipped.
cert
(Optional) Path to the SSL client certificate file to connect to NSX-T manager.
The certificate can be retrieved from browser.
cert_common_name
(Optional) By default, the hostname parameter and the common name in certificate is compared for host name
verification. If the client certificate common name and hostname do not match (in case of self-signed
certificates), specify the certificate common name as part of this parameter. This value is then used to
compare against
"""
log.info("Finding IP Address Pool with display name: %s", display_name)
ip_pools = common._read_paginated(
func=get,
display_name=display_name,
hostname=hostname,
username=username,
password=password,
verify_ssl=verify_ssl,
cert=cert,
cert_common_name=cert_common_name,
)
if "error" in ip_pools:
return ip_pools
return {"results": ip_pools}
def create(
hostname,
username,
password,
verify_ssl=True,
cert=None,
cert_common_name=None,
display_name=None,
description=None,
tags=None,
subnets=None,
ip_release_delay=None,
):
"""
Creates an IP Address pool with given specifications
CLI Example:
.. code-block:: bash
salt vm_minion nsxt_ip_pools.create hostname=nsxt-manager.local username=admin ...
hostname
The host name of NSX-T manager
username
Username to connect to NSX-T manager
password
Password to connect to NSX-T manager
verify_ssl
(Optional) Option to enable/disable SSL verification. Enabled by default.
If set to False, the certificate validation is skipped.
cert
(Optional) Path to the SSL client certificate file to connect to NSX-T manager.
The certificate can be retrieved from browser.
cert_common_name
(Optional) By default, the hostname parameter and the common name in certificate is compared for host name
verification. If the client certificate common name and hostname do not match (in case of self-signed
certificates), specify the certificate common name as part of this parameter. This value is then used to
compare against
display_name
The name using which IP Address Pool will be created. If not provided then pool id will be used as
display name
description
(Optional) description for the IP Address Pool
tags
(Optional) Opaque identifiers meaningful to the API user. Maximum 30 tags can be associated:
.. code::
tags='[
{
"tag": "<tag-key-1>"
"scope": "<tag-value-1>"
},
{
"tag": "<tag-key-2>"
"scope": "<tag-value-2>"
}
]'
subnets
(Optional) The collection of one or more subnet objects in a pool.
Subnets can be IPv4 or IPv6 and they should not overlap. The maximum number will not exceed 5 subnets.
.. code::
subnets='[
{
"cidr": "cidr_value",
"gateway_ip": "gateway_ip_value",
"dns_nameservers": [
"dns_nameserver1",
"dns_nameserver2"
],
"allocation_ranges": [
{
"start": "IP-Address-Range-start",
"end": "IP-Address-Range-end"
}
]
}
]'
ip_release_delay
(Optional) Delay in milliseconds, while releasing allocated IP address from IP pool (Default is 2 mins - configured on NSX device).
"""
log.info("Creating IP Address Pool")
url = IP_POOLS_BASE_URL.format(hostname)
req_data = common._filter_kwargs(
allowed_kwargs=["display_name", "description", "subnets", "tags", "ip_release_delay"],
default_dict=None,
display_name=display_name,
description=description,
tags=tags,
subnets=subnets,
ip_release_delay=ip_release_delay,
)
return nsxt_request.call_api(
method="post",
url=url,
username=username,
password=password,
cert_common_name=cert_common_name,
verify_ssl=verify_ssl,
cert=cert,
data=req_data,
)
def update(
ip_pool_id,
display_name,
revision,
hostname,
username,
password,
verify_ssl=True,
cert=None,
cert_common_name=None,
description=None,
tags=None,
subnets=None,
ip_release_delay=None,
):
"""
Updates an IP Address pool of display name with given specifications, All the fields for which no value is
provided will be set to null
CLI Example:
.. code-block:: bash
salt vm_minion nsxt_ip_pools.update hostname=nsxt-manager.local username=admin ...
hostname
The host name of NSX-T manager
username
Username to connect to NSX-T manager
password
Password to connect to NSX-T manager
verify_ssl
(Optional) Option to enable/disable SSL verification. Enabled by default.
If set to False, the certificate validation is skipped.
cert
(Optional) Path to the SSL client certificate file to connect to NSX-T manager.
The certificate can be retrieved from browser.
cert_common_name
(Optional) By default, the hostname parameter and the common name in certificate is compared for host name
verification. If the client certificate common name and hostname do not match (in case of self-signed
certificates), specify the certificate common name as part of this parameter. This value is then used to
compare against
ip_pool_id
Id of the existing IP Address pool
display_name
Existing IP Pool display name. This is a non updatable field
description
(Optional) description for the IP Address Pool
tags
(Optional) Opaque identifiers meaningful to the API user. Maximum 30 tags can be associated:
.. code::
tags='[
{
"tag": "<tag-key-1>"
"scope": "<tag-value-1>"
},
{
"tag": "<tag-key-2>"
"scope": "<tag-value-2>"
}
]'
subnets
(Optional) The collection of one or more subnet objects in a pool.
Subnets can be IPv4 or IPv6 and they should not overlap. The maximum number will not exceed 5 subnets.
.. code::
subnets='[
{
"cidr": "cidr_value",
"gateway_ip": "gateway_ip_value",
"dns_nameservers": [
"dns_nameserver1",
"dns_nameserver2"
],
"allocation_ranges": [
{
"start": "IP-Address-Range-start",
"end": "IP-Address-Range-end"
}
]
}
]'
ip_release_delay
(Optional) Delay in milliseconds, while releasing allocated IP address from IP pool (Default is 2 mins).
revision
Revision number of IP Pool to update
"""
log.info("Updating IP Address Pool %s", display_name)
url = IP_POOLS_BASE_URL.format(hostname) + "/{}".format(ip_pool_id)
req_data = common._filter_kwargs(
allowed_kwargs=["description", "subnets", "tags", "ip_release_delay"],
default_dict={
"id": ip_pool_id,
"_revision": revision,
"display_name": display_name,
},
description=description,
tags=tags,
subnets=subnets,
ip_release_delay=ip_release_delay,
)
return nsxt_request.call_api(
method="put",
url=url,
username=username,
password=password,
cert_common_name=cert_common_name,
verify_ssl=verify_ssl,
cert=cert,
data=req_data,
)
def delete(
ip_pool_id, hostname, username, password, verify_ssl=True, cert=None, cert_common_name=None
):
"""
Deletes an IP Address pool with given id
CLI Example:
.. code-block:: bash
salt vm_minion nsxt_ip_pools.delete hostname=nsxt-manager.local username=admin ...
hostname
The host name of NSX-T manager
username
Username to connect to NSX-T manager
password
Password to connect to NSX-T manager
ip_pool_id
Existing IP Pool id
verify_ssl
(Optional) Option to enable/disable SSL verification. Enabled by default.
If set to False, the certificate validation is skipped.
cert
(Optional) Path to the SSL client certificate file to connect to NSX-T manager.
The certificate can be retrieved from browser.
cert_common_name
(Optional) By default, the hostname parameter and the common name in certificate is compared for host name
verification. If the client certificate common name and hostname do not match (in case of self-signed
certificates), specify the certificate common name as part of this parameter. This value is then used to
compare against
"""
log.info("Deleting IP Address Pool %s", ip_pool_id)
url = IP_POOLS_BASE_URL.format(hostname) + "/{}".format(ip_pool_id)
response = nsxt_request.call_api(
method="delete",
url=url,
username=username,
password=password,
cert_common_name=cert_common_name,
verify_ssl=verify_ssl,
cert=cert,
)
return response or "IP Pool deleted successfully" | /saltext.vmware-22.3.7.0rc1.tar.gz/saltext.vmware-22.3.7.0rc1/src/saltext/vmware/modules/nsxt_ip_pools.py | 0.745213 | 0.175114 | nsxt_ip_pools.py | pypi |
import logging
import salt.exceptions
import saltext.vmware.utils.cluster as utils_cluster
import saltext.vmware.utils.common as utils_common
import saltext.vmware.utils.datacenter as utils_datacenter
from saltext.vmware.utils.connect import get_service_instance
log = logging.getLogger(__name__)
try:
from pyVmomi import vim
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
__virtualname__ = "vmware_cluster_drs"
__proxyenabled__ = ["vmware_cluster_drs"]
__func_alias__ = {"get_": "get"}
def __virtual__():
if not HAS_PYVMOMI:
return False, "Unable to import pyVmomi module."
return __virtualname__
def configure(
cluster,
datacenter,
enable=False,
enable_vm_behavior_overrides=True,
default_vm_behavior=None,
vmotion_rate=3,
advanced_settings=None,
service_instance=None,
):
"""
Configure a Distributed Resource Scheduler (DRS) for a given cluster
Supported proxies: esxcluster
cluster
The cluster name
datacenter
The datacenter name to which the cluster belongs
enable
Enable DRS for the cluster
enable_vm_behavior_overrides
Flag that dictates whether DRS Behavior overrides for individual virtual machines are enabled.
The default value is true.
When this flag is true, overrides the default_vm_behavior.
When this flag is false, the default_vm_behavior value applies to all virtual machines.
default_vm_behavior
Specifies the cluster-wide default DRS behavior for virtual machines.
Valid Values:
- ``fullyAutomated``: Specifies that VirtualCenter should automate both the migration of virtual machines
and their placement with a host at power on.
- ``manual``: Specifies that VirtualCenter should generate recommendations for virtual machine migration
and for placement with a host, but should not implement the recommendations automatically.
- ``partiallyAutomated``: Specifies that VirtualCenter should generate recommendations for virtual
machine migration and for placement with a host, but should automatically
implement only the placement at power on.
vmotion_rate
Threshold for generated ClusterRecommendations. DRS generates only those recommendations that are above
the specified vmotionRate. Ratings vary from 1 to 5. This setting applies to manual, partiallyAutomated,
and fullyAutomated DRS clusters. 1 - Conservative, 5 - Aggressive. Default is 3.
advanced_settings
Advanced options for the cluster, to be passed in as a dictionary.
.. code-block:: bash
salt '*' vmware_cluster_drs.configure cluster1 dc1 enable=True
"""
if service_instance is None:
service_instance = get_service_instance(opts=__opts__, pillar=__pillar__)
try:
dc_ref = utils_datacenter.get_datacenter(service_instance, datacenter)
cluster_ref = utils_cluster.get_cluster(dc_ref=dc_ref, cluster=cluster)
cluster_spec = vim.cluster.ConfigSpecEx()
cluster_spec.drsConfig = vim.cluster.DrsConfigInfo()
cluster_spec.drsConfig.enabled = enable
cluster_spec.drsConfig.enableVmBehaviorOverrides = enable_vm_behavior_overrides
cluster_spec.drsConfig.defaultVmBehavior = default_vm_behavior
cluster_spec.drsConfig.vmotionRate = 6 - vmotion_rate
cluster_spec.drsConfig.option = []
for key in advanced_settings or {}:
cluster_spec.drsConfig.option.append(
vim.OptionValue(key=key, value=advanced_settings[key])
)
utils_cluster.update_cluster(cluster_ref=cluster_ref, cluster_spec=cluster_spec)
except (salt.exceptions.VMwareApiError, salt.exceptions.VMwareRuntimeError) as exc:
return {cluster: False, "reason": str(exc)}
return {cluster: True}
def get(cluster_name, datacenter_name, service_instance=None):
"""
Get DRS info about a cluster in a datacenter
cluster_name
The cluster name
datacenter_name
The datacenter name to which the cluster belongs
service_instance
Use this vCenter service connection instance instead of creating a new one. (optional).
.. code-block:: bash
salt '*' vmware_cluster_drs.get cluster_name=cl1 datacenter_name=dc1
"""
ret = {}
if service_instance is None:
service_instance = get_service_instance(opts=__opts__, pillar=__pillar__)
try:
dc_ref = utils_datacenter.get_datacenter(service_instance, datacenter_name)
cluster_ref = utils_cluster.get_cluster(dc_ref=dc_ref, cluster=cluster_name)
ret["enabled"] = cluster_ref.configurationEx.drsConfig.enabled
ret[
"enable_vm_behavior_overrides"
] = cluster_ref.configurationEx.drsConfig.enableVmBehaviorOverrides
ret["default_vm_behavior"] = cluster_ref.configurationEx.drsConfig.defaultVmBehavior
ret["vmotion_rate"] = 6 - cluster_ref.configurationEx.drsConfig.vmotionRate
ret["advanced_settings"] = {}
for obj in cluster_ref.configurationEx.drsConfig.option:
ret["advanced_settings"][obj.key] = obj.value
except (salt.exceptions.VMwareApiError, salt.exceptions.VMwareRuntimeError) as exc:
return {cluster_name: False, "reason": str(exc)}
return ret
def vm_affinity_rule(
name,
affinity,
vm_names,
cluster_name,
datacenter_name,
enabled=True,
mandatory=None,
service_instance=None,
):
"""
Configure a virtual machine to virtual machine DRS rule
name
The name of the rule.
affinity
(boolean) Describes whether to make affinity or anti affinity rule.
vm_names
List of virtual machines associated with DRS rule.
cluster_name
The name of the cluster to configure a rule on.
datacenter_name
The name of the datacenter where the cluster exists.
enabled
(optional, boolean) Enable the DRS rule being created. Defaults to True.
mandatory
(optional, boolean) Sets whether the rule being created is mandatory. Defaults to False.
service_instance
(optional) The Service Instance from which to obtain managed object references.
"""
log.debug(f"Configuring a vm to vm DRS rule {name} on cluster {cluster_name}.")
if service_instance is None:
service_instance = get_service_instance(opts=__opts__, pillar=__pillar__)
dc_ref = utils_common.get_datacenter(service_instance, datacenter_name)
cluster_ref = utils_cluster.get_cluster(dc_ref, cluster_name)
vm_refs = []
missing_vms = []
for vm_name in vm_names:
vm_ref = utils_common.get_mor_by_property(service_instance, vim.VirtualMachine, vm_name)
if not vm_ref:
missing_vms.append(vm_name)
vm_refs.append(vm_ref)
if missing_vms:
raise salt.exceptions.VMwareApiError({f"Could not find virtual machines {missing_vms}"})
rules = cluster_ref.configuration.rule
rule_ref = None
if rules:
for rule in rules:
if rule.name == name:
rule_info = utils_cluster.drs_rule_info(rule)
if utils_cluster.check_affinity(rule) != affinity:
return {
"updated": False,
"message": f"Existing rule of name {name} has an affinity of {not affinity} and cannot be changed, make new rule.",
}
if (
rule_info["vms"] == vm_names
and rule_info["enabled"] == enabled
and rule_info["mandatory"] == mandatory
):
return {
"updated": True,
"message": "Exact rule already exists.",
}
rule_ref = rule
if rule_ref:
utils_cluster.update_drs_rule(rule_ref, vm_refs, enabled, mandatory, cluster_ref)
return {"updated": True}
else:
utils_cluster.create_drs_rule(name, affinity, vm_refs, enabled, mandatory, cluster_ref)
return {"created": True}
def rule_info(cluster_name, datacenter_name, rule_name=None, service_instance=None):
"""
Return a list of all the DRS rules on a given cluster, or one DRS rule if filtered by rule_name.
cluster_name
The name of the cluster to get rules from.
datacenter_name
The name of the datacenter where the cluster exists.
rule_name
(optional) Return only the rule with rule_name
service_instance
(optional) The Service Instance from which to obtain managed object references.
"""
log.debug(f"Getting rules info on cluster {cluster_name}.")
if service_instance is None:
service_instance = get_service_instance(opts=__opts__, pillar=__pillar__)
dc_ref = utils_common.get_datacenter(service_instance, datacenter_name)
cluster_ref = utils_cluster.get_cluster(dc_ref, cluster_name)
rules = cluster_ref.configuration.rule
info = []
if rule_name:
for rule in rules:
if rule.name == rule_name:
return utils_cluster.drs_rule_info(rule)
else:
for rule in rules:
info.append(utils_cluster.drs_rule_info(rule))
return info
raise salt.exceptions.VMwareApiError({f"Rule name {rule_name} not found."}) | /saltext.vmware-22.3.7.0rc1.tar.gz/saltext.vmware-22.3.7.0rc1/src/saltext/vmware/modules/cluster_drs.py | 0.55929 | 0.218086 | cluster_drs.py | pypi |
import logging
from saltext.vmware.utils import vmc_constants
from saltext.vmware.utils import vmc_request
from saltext.vmware.utils import vmc_vcenter_request
log = logging.getLogger(__name__)
__virtualname__ = "vmc_vcenter_stats"
def __virtual__():
return __virtualname__
def list_monitored_items(hostname, username, password, verify_ssl=True, cert=None):
"""
Retrieves monitored items list for given vCenter.
CLI Example:
.. code-block:: bash
salt vm_minion vmc_vcenter_stats.list_monitored_items hostname=sample-vcenter.vmwarevmc.com ...
hostname
Hostname of the vCenter console
username
admin username required to login to vCenter console
password
admin password required to login to vCenter console
verify_ssl
(Optional) Option to enable/disable SSL verification. Enabled by default.
If set to False, the certificate validation is skipped.
cert
(Optional) Path to the SSL client certificate file to connect to VMC Cloud Console.
The certificate can be retrieved from browser.
"""
log.info("Retrieving the monitored items list for vCenter")
api_url_base = vmc_request.set_base_url(hostname)
api_url = "{base_url}api/appliance/monitoring"
api_url = api_url.format(base_url=api_url_base)
headers = vmc_vcenter_request.get_headers(hostname, username, password)
return vmc_vcenter_request.call_api(
method=vmc_constants.GET_REQUEST_METHOD,
url=api_url,
headers=headers,
description="vmc_vcenter_stats.list_monitored_items",
verify_ssl=verify_ssl,
cert=cert,
)
def query_monitored_items(
hostname,
username,
password,
start_time,
end_time,
interval,
aggregate_function,
monitored_items,
verify_ssl=True,
cert=None,
):
"""
Retrieves aggregate monitoring data for the given ``monitored_items`` across the time range.
Data will be grouped using the ``aggregate_function`` for each ``interval`` in the time range.
CLI Example:
.. code-block:: bash
salt vm_minion vmc_vcenter_stats.query_monitored_items hostname=sample-vcenter.vmwarevmc.com ...
hostname
Hostname of the vCenter console
username
admin username required to login to vCenter console
password
admin password required to login to vCenter console
start_time
Start time in UTC (inclusive). Ex: 2021-05-06T22:13:05.651Z
end_time
End time in UTC (inclusive). Ex: 2021-05-10T22:13:05.651Z
interval
interval between the values in hours and mins, for which aggregation will apply.
Possible values: MINUTES30, HOURS2, MINUTES5, DAY1, HOURS6
aggregate_function
aggregation function. Possible values: COUNT, MAX, AVG, MIN
monitored_items
List of monitored item IDs. Ex: [cpu.util, mem.util]
verify_ssl
(Optional) Option to enable/disable SSL verification. Enabled by default.
If set to False, the certificate validation is skipped.
cert
(Optional) Path to the SSL client certificate file to connect to VMC Cloud Console.
The certificate can be retrieved from browser.
"""
msg = "Retrieving the vCenter monitoring data for {}".format(monitored_items)
log.info(msg)
api_url_base = vmc_request.set_base_url(hostname)
api_url = "{base_url}api/appliance/monitoring/query"
api_url = api_url.format(base_url=api_url_base)
headers = vmc_vcenter_request.get_headers(hostname, username, password)
params = {
"start_time": start_time,
"end_time": end_time,
"function": aggregate_function,
"interval": interval,
"names": monitored_items,
}
return vmc_vcenter_request.call_api(
method=vmc_constants.GET_REQUEST_METHOD,
url=api_url,
headers=headers,
description="vmc_vcenter_stats.query_monitored_items",
verify_ssl=verify_ssl,
cert=cert,
params=params,
) | /saltext.vmware-22.3.7.0rc1.tar.gz/saltext.vmware-22.3.7.0rc1/src/saltext/vmware/modules/vmc_vcenter_stats.py | 0.80784 | 0.159938 | vmc_vcenter_stats.py | pypi |
import logging
import salt.exceptions
import salt.utils.platform
import saltext.vmware.utils.common as utils_common
import saltext.vmware.utils.connect as connect
import saltext.vmware.utils.datastore as utils_datastore
import saltext.vmware.utils.vm as utils_vm
log = logging.getLogger(__name__)
try:
from pyVmomi import vim
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
__virtualname__ = "vmware_vm"
__proxyenabled__ = ["vmware_vm"]
__func_alias__ = {"list_": "list"}
def __virtual__():
return __virtualname__
def list_(service_instance=None):
"""
Returns virtual machines.
service_instance
(optional) The Service Instance from which to obtain managed object references.
"""
if service_instance is None:
service_instance = connect.get_service_instance(opts=__opts__, pillar=__pillar__)
return utils_vm.list_vms(service_instance)
def list_templates(service_instance=None):
"""
Returns virtual machines tempates.
service_instance
(optional) The Service Instance from which to obtain managed object references.
"""
if service_instance is None:
service_instance = connect.get_service_instance(opts=__opts__, pillar=__pillar__)
return utils_vm.list_vm_templates(service_instance)
def path(vm_name, service_instance=None):
"""
Returns specified virtual machine path.
vm_name
The name of the virtual machine.
service_instance
The Service Instance from which to obtain managed object references.
"""
if service_instance is None:
service_instance = connect.get_service_instance(opts=__opts__, pillar=__pillar__)
vm_ref = utils_common.get_mor_by_property(
service_instance,
vim.VirtualMachine,
vm_name,
)
return utils_common.get_path(vm_ref, service_instance)
def _deploy_ovf(name, host_name, ovf, service_instance=None):
"""
Helper fuctions that takes in a OVF file to create a virtual machine.
Returns virtual machine reference.
name
The name of the virtual machine to be created.
host_name
The name of the esxi host to create the vitual machine on.
ovf_path
The path to the Open Virtualization Format that contains a configuration of a virtual machine.
service_instance
The Service Instance from which to obtain managed object references.
"""
if service_instance is None:
service_instance = connect.get_service_instance(opts=__opts__, pillar=__pillar__)
vms = list_(service_instance)
if name in vms:
raise salt.exceptions.CommandExecutionError("Duplicate virtual machine name.")
content = service_instance.content
manager = content.ovfManager
spec_params = vim.OvfManager.CreateImportSpecParams(entityName=name)
resources = utils_common.deployment_resources(host_name, service_instance)
import_spec = manager.CreateImportSpec(
ovf, resources["resource_pool"], resources["destination_host"].datastore[0], spec_params
)
errors = [e.msg for e in import_spec.error]
if errors:
log.exception(errors)
raise salt.exceptions.VMwareApiError(errors)
vm_ref = utils_vm.create_vm(
name,
import_spec.importSpec.configSpec,
resources["datacenter"].vmFolder,
resources["resource_pool"],
resources["destination_host"],
)
return vm_ref
def deploy_ovf(vm_name, host_name, ovf_path, service_instance=None):
"""
Deploy a virtual machine from an OVF
vm_name
The name of the virtual machine to be created.
host_name
The name of the esxi host to create the vitual machine on.
ovf_path
The path to the Open Virtualization Format that contains a configuration of a virtual machine.
service_instance
(optional) The Service Instance from which to obtain managed object references.
"""
ovf = utils_vm.read_ovf_file(ovf_path)
_deploy_ovf(vm_name, host_name, ovf, service_instance)
return {"deployed": True}
def deploy_ova(vm_name, host_name, ova_path, service_instance=None):
"""
Deploy a virtual machine from an OVA
vm_name
The name of the virtual machine to be created.
host_name
The name of the esxi host to create the vitual machine on.
ova_path
The path to the Open Virtualization Appliance that contains a compressed configuration of a virtual machine.
service_instance
(optional) The Service Instance from which to obtain managed object references.
"""
ovf = utils_vm.read_ovf_from_ova(ova_path)
_deploy_ovf(vm_name, host_name, ovf, service_instance)
return {"deployed": True}
def deploy_template(vm_name, template_name, host_name, service_instance=None):
"""
Deploy a virtual machine from a template virtual machine.
vm_name
The name of the virtual machine to be created.
template_name
The name of the template to clone from.
host_name
The name of the esxi host to create the vitual machine on.
service_instance
(optional) The Service Instance from which to obtain managed object references.
"""
if service_instance is None:
service_instance = connect.get_service_instance(opts=__opts__, pillar=__pillar__)
vms = list_(service_instance)
if vm_name in vms:
raise salt.exceptions.CommandExecutionError("Duplicate virtual machine name.")
template_vms = list_templates(service_instance)
if template_name not in template_vms:
raise salt.exceptions.CommandExecutionError("Template does not exist.")
template = utils_common.get_mor_by_property(service_instance, vim.VirtualMachine, template_name)
resources = utils_common.deployment_resources(host_name, service_instance)
relospec = vim.vm.RelocateSpec()
relospec.pool = resources["resource_pool"]
clonespec = vim.vm.CloneSpec()
clonespec.location = relospec
utils_vm.clone_vm(vm_name, resources["datacenter"].vmFolder, template, clonespec)
return {"deployed": True}
def info(vm_name=None, service_instance=None):
"""
Return basic info about a vSphere VM guest
vm_name
(optional) The name of the virtual machine to get info on.
service_instance
(optional) The Service Instance from which to obtain managed object references.
"""
vms = []
info = {}
if service_instance is None:
service_instance = connect.get_service_instance(opts=__opts__, pillar=__pillar__)
if vm_name:
vms.append(
utils_common.get_mor_by_property(
service_instance,
vim.VirtualMachine,
vm_name,
)
)
else:
for dc in service_instance.content.rootFolder.childEntity:
for i in dc.vmFolder.childEntity:
if isinstance(i, vim.VirtualMachine):
vms.append(i)
for vm in vms:
datacenter_ref = utils_common.get_parent_type(vm, vim.Datacenter)
mac_address = utils_vm.get_mac_address(vm)
network = utils_vm.get_network(vm)
tags = []
for tag in vm.tag:
tags.append(tag.name)
folder_path = utils_common.get_path(vm, service_instance)
info[vm.summary.config.name] = {
"guest_name": vm.summary.config.name,
"guest_fullname": vm.summary.guest.guestFullName,
"power_state": vm.summary.runtime.powerState,
"ip_address": vm.summary.guest.ipAddress,
"mac_address": mac_address,
"uuid": vm.summary.config.uuid,
"vm_network": network,
"esxi_hostname": vm.summary.runtime.host.name,
"datacenter": datacenter_ref.name,
"cluster": vm.summary.runtime.host.parent.name,
"tags": tags,
"folder": folder_path,
"moid": vm._moId,
}
return info
def power_state(vm_name, state, datacenter_name=None, service_instance=None):
"""
Manages the power state of a virtual machine.
vm_name
The name of the virtual machine.
state
The state you want the specified virtual machine in (powered-on,powered-off,suspend,reset).
datacenter_name
(optional) The name of the datacenter containing the virtual machine you want to manage.
service_instance
(optional) The Service Instance from which to obtain managed object references.
"""
log.trace(f"Managing power state of virtual machine {vm_name} to {state}")
if service_instance is None:
service_instance = connect.get_service_instance(opts=__opts__, pillar=__pillar__)
if datacenter_name:
dc_ref = utils_common.get_mor_by_property(service_instance, vim.Datacenter, datacenter_name)
vm_ref = utils_common.get_mor_by_property(
service_instance, vim.VirtualMachine, vm_name, "name", dc_ref
)
else:
vm_ref = utils_common.get_mor_by_property(service_instance, vim.VirtualMachine, vm_name)
if state == "powered-on" and vm_ref.summary.runtime.powerState == "poweredOn":
result = {
"comment": "Virtual machine is already powered on",
"changes": {"state": vm_ref.summary.runtime.powerState},
}
return result
elif state == "powered-off" and vm_ref.summary.runtime.powerState == "poweredOff":
result = {
"comment": "Virtual machine is already powered off",
"changes": {"state": vm_ref.summary.runtime.powerState},
}
return result
elif state == "suspend" and vm_ref.summary.runtime.powerState == "suspended":
result = {
"comment": "Virtual machine is already suspended",
"changes": {"state": vm_ref.summary.runtime.powerState},
}
return result
result_ref_vm = utils_vm.power_cycle_vm(vm_ref, state)
result = {
"comment": f"Virtual machine {state} action succeeded",
"changes": {"state": result_ref_vm.summary.runtime.powerState},
}
return result
def boot_manager(
vm_name,
order=["cdrom", "disk", "ethernet", "floppy"],
delay=0,
enter_bios_setup=False,
retry_delay=0,
efi_secure_boot_enabled=False,
service_instance=None,
):
"""
Manage boot option for a virtual machine
vm_name
The name of the virtual machine.
order
(List of strings) Boot order of devices. Acceptable strings: cdrom, disk, ethernet, floppy
delay
(integer, optional) Boot delay. When powering on or resetting, delay boot order by given milliseconds. Defaults to 0.
enter_bios_setup
(boolean, optional) During the next boot, force entry into the BIOS setup screen. Defaults to False.
retry_delay
(integer, optional) If the VM fails to find boot device, automatically retry after given milliseconds. Defaults to None, has no effect unless retry_enabled is True.
efi_secure_boot_enabled
(boolean, optional) Defaults to False.
service_instance
(optional) The Service Instance from which to obtain managed object references.
"""
if service_instance is None:
service_instance = connect.get_service_instance(opts=__opts__, pillar=__pillar__)
vm = utils_common.get_mor_by_property(service_instance, vim.VirtualMachine, vm_name)
boot_order_list = utils_vm.options_order_list(vm, order)
# we removed the ability to individually set bootRetryEnabled, easily implemented if asked for
input_opts = {
"bootOrder": boot_order_list,
"bootDelay": delay,
"enterBIOSSetup": enter_bios_setup,
"bootRetryEnabled": bool(retry_delay),
"bootRetryDelay": retry_delay,
"efiSecureBootEnabled": efi_secure_boot_enabled,
}
if utils_vm.compare_boot_options(input_opts, vm.config.bootOptions):
return {"status": "already configured this way"}
ret = utils_vm.change_boot_options(vm, input_opts)
return ret
def create_snapshot(
vm_name,
snapshot_name,
description="",
include_memory=False,
quiesce=False,
datacenter_name=None,
service_instance=None,
):
"""
Create snapshot of given vm.
vm_name
The name of the virtual machine.
snapshot_name
The name for the snapshot being created. Not unique
description
Description for the snapshot.
include_memory
(boolean, optional) If TRUE, a dump of the internal state of the virtual machine (basically a memory dump) is included in the snapshot.
quiesce
(boolean, optional) If TRUE and the virtual machine is powered on when the snapshot is taken, VMware Tools is used to quiesce the file system in the virtual machine.
datacenter_name
(optional) The name of the datacenter containing the virtual machine.
service_instance
(optional) The Service Instance from which to obtain managed object references.
"""
if service_instance is None:
service_instance = connect.get_service_instance(opts=__opts__, pillar=__pillar__)
if datacenter_name:
dc_ref = utils_common.get_mor_by_property(service_instance, vim.Datacenter, datacenter_name)
vm_ref = utils_common.get_mor_by_property(
service_instance, vim.VirtualMachine, vm_name, "name", dc_ref
)
else:
vm_ref = utils_common.get_mor_by_property(service_instance, vim.VirtualMachine, vm_name)
snapshot = utils_vm.create_snapshot(vm_ref, snapshot_name, description, include_memory, quiesce)
if isinstance(snapshot, vim.vm.Snapshot):
return {"snapshot": "created"}
else:
return {"snapshot": "failed to create"}
def destroy_snapshot(
vm_name,
snapshot_name,
snapshot_id=None,
remove_children=False,
datacenter_name=None,
service_instance=None,
):
"""
Destroy snapshot of given vm.
vm_name
The name of the virtual machine.
snapshot_name
The name for the snapshot being destroyed. Not unique
snapshot_id
(optional) ID of snapshot to be destroyed.
remove_children
(optional, Bool) Remove snapshots below snapshot being removed in tree.
datacenter_name
(optional) The name of the datacenter containing the virtual machine.
service_instance
(optional) The Service Instance from which to obtain managed object references.
"""
if service_instance is None:
service_instance = connect.get_service_instance(opts=__opts__, pillar=__pillar__)
if datacenter_name:
dc_ref = utils_common.get_mor_by_property(service_instance, vim.Datacenter, datacenter_name)
vm_ref = utils_common.get_mor_by_property(
service_instance, vim.VirtualMachine, vm_name, "name", dc_ref
)
else:
vm_ref = utils_common.get_mor_by_property(service_instance, vim.VirtualMachine, vm_name)
snap_ref = utils_vm.get_snapshot(vm_ref, snapshot_name, snapshot_id)
utils_vm.destroy_snapshot(snap_ref.snapshot, remove_children)
return {"snapshot": "destroyed"}
def snapshot(vm_name, datacenter_name=None, service_instance=None):
"""
Return info about a virtual machine snapshots
vm_name
(optional) The name of the virtual machine to get info on.
service_instance
(optional) The Service Instance from which to obtain managed object references.
"""
if service_instance is None:
service_instance = connect.get_service_instance(opts=__opts__, pillar=__pillar__)
if datacenter_name:
dc_ref = utils_common.get_mor_by_property(service_instance, vim.Datacenter, datacenter_name)
vm_ref = utils_common.get_mor_by_property(
service_instance, vim.VirtualMachine, vm_name, "name", dc_ref
)
else:
vm_ref = utils_common.get_mor_by_property(service_instance, vim.VirtualMachine, vm_name)
snapshots = utils_vm.get_snapshots(vm_ref)
return {"snapshots": snapshots}
def relocate(vm_name, new_host_name, datastore_name, service_instance=None):
"""
Relocates a virtual machine to the location specified.
vm_name
The name of the virtual machine to relocate.
new_host_name
The name of the host you want to move the virtual machine to.
datastore_name
The name of the datastore you want to move the virtual machine to.
service_instance
(optional) The Service Instance from which to obtain managed object references.
"""
if service_instance is None:
service_instance = connect.get_service_instance(opts=__opts__, pillar=__pillar__)
vm_ref = utils_common.get_mor_by_property(service_instance, vim.VirtualMachine, vm_name)
resources = utils_common.deployment_resources(new_host_name, service_instance)
assert isinstance(datastore_name, str)
datastores = utils_datastore.get_datastores(
service_instance, datastore_name=datastore_name, datacenter_name=datacenter_name
)
datastore_ref = datastores[0] if datastores else None
ret = utils_vm.relocate(
vm_ref, resources["destination_host"], datastore_ref, resources["resource_pool"]
)
if ret == "success":
return {"virtual_machine": "moved"}
return {"virtual_machine": "failed to move"} | /saltext.vmware-22.3.7.0rc1.tar.gz/saltext.vmware-22.3.7.0rc1/src/saltext/vmware/modules/vm.py | 0.600657 | 0.157979 | vm.py | pypi |
import logging
from saltext.vmware.utils import common
from saltext.vmware.utils import nsxt_request
log = logging.getLogger(__name__)
__virtual_name__ = "nsxt_uplink_profiles"
def __virtual__():
return __virtual_name__
UPLINK_PROFILES_BASE_URL = "https://{0}/api/v1/host-switch-profiles"
create_params_for_uplink_profies = [
"lags",
"mtu",
"named_teamings",
"overlay_encap",
"required_capabilities",
"tags",
"transport_vlan",
"description",
]
def get(
hostname,
username,
password,
verify_ssl=True,
cert=None,
cert_common_name=None,
cursor=None,
deployment_type=None,
hostswitch_profile_type=None,
include_system_owned=None,
included_fields=None,
node_type=None,
page_size=None,
sort_ascending=None,
sort_by=None,
uplink_teaming_policy_name=None,
):
"""
Lists NSX-T up-link profiles
CLI Example:
.. code-block:: bash
salt vm_minion nsxt_uplink_profiles.get hostname=nsxt-manager.local username=admin ...
hostname
The host name of NSX-T manager
username
Username to connect to NSX-T manager
password
Password to connect to NSX-T manager
verify_ssl
(Optional) Option to enable/disable SSL verification. Enabled by default.
If set to False, the certificate validation is skipped.
cert
(Optional) Path to the SSL certificate file to connect to NSX-T manager.
The certificate can be retrieved from browser.
cert_common_name
(Optional) By default, the hostname parameter and the common name in certificate is compared for host name
verification.
If the client certificate common name and hostname do not match (in case of self-signed certificates),
specify the certificate common name as part of this parameter. This value is then used to compare against
certificate common name.
cursor
(Optional) Opaque cursor to be used for getting next page of records (supplied by current result page)
deployment_type
(Optional) Deployment type of EdgeNode or PublicCloudGatewayNode
If the node_type is specified, then deployment_type may be specified to filter uplink profiles applicable to
only PHYSICAL_MACHINE or VIRTUAL_MACHINE deployments of these nodes.
hostswitch_profile_type
(Optional) Type of host switch profile
include_system_owned
(Optional) Boolean. Whether the list result contains system resources
included_fields
(Optional) Comma separated list of fields that should be included in query result
node_type
(Optional) Fabric node type for which uplink profiles are to be listed. The fabric node type is the resource_type of the
Node such as EdgeNode and PublicCloudGatewayNode. If a fabric node type is given, uplink profiles that apply
for nodes of the given type will be returned.
page_size
(Optional) Maximum number of results to return in this page
sort_ascending
(Optional) Boolean
sort_by
(Optional) Field by which records are sorted
uplink_teaming_policy_name
(Optional) The host switch profile's uplink teaming policy name. If populated, only UplinkHostSwitchProfiles with the
specified uplink teaming policy name are returned. Otherwise, any HostSwitchProfile can be returned.
"""
log.info("Fetching NSX-T uplink profiles")
url = UPLINK_PROFILES_BASE_URL.format(hostname)
params = common._filter_kwargs(
allowed_kwargs=[
"cursor",
"deployment_type",
"hostswitch_profile_type",
"include_system_owned",
"included_fields",
"node_type",
"page_size",
"sort_ascending",
"sort_by",
"uplink_teaming_policy_name",
],
default_dict=None,
cursor=cursor,
deployment_type=deployment_type,
hostswitch_profile_type=hostswitch_profile_type,
include_system_owned=include_system_owned,
included_fields=included_fields,
page_size=page_size,
sort_ascending=sort_ascending,
sort_by=sort_by,
uplink_teaming_policy_name=uplink_teaming_policy_name,
node_type=node_type,
)
return nsxt_request.call_api(
method="get",
url=url,
username=username,
password=password,
cert_common_name=cert_common_name,
verify_ssl=verify_ssl,
cert=cert,
params=params,
)
def get_by_display_name(
hostname,
username,
password,
display_name,
verify_ssl=True,
cert=None,
cert_common_name=None,
):
"""
Gets nsxt uplink profiles(UplinkHostSwitchProfile) present in the NSX-T Manager with given display_name.
CLI Example:
.. code-block:: bash
salt vm_minion nsxt_uplink_profiles.get_by_display_name hostname=nsxt-manager.local username=admin ...
hostname
The host name of NSX-T manager
username
Username to connect to NSX-T manager
password
Password to connect to NSX-T manager
display_name
The name of the uplink profile
verify_ssl
(Optional) Option to enable/disable SSL verification. Enabled by default.
If set to False, the certificate validation is skipped.
cert
(Optional) Path to the SSL client certificate file to connect to NSX-T manager.
The certificate can be retrieved from browser.
cert_common_name
(Optional) By default, the hostname parameter and the common name in certificate is compared for host name
verification. If the client certificate common name and hostname do not match (in case of self-signed
certificates), specify the certificate common name as part of this parameter. This value is then used to
compare against certificate common name
"""
log.info("Finding uplink profiles with display name: %s", display_name)
uplink_profiles = common._read_paginated(
func=get,
display_name=display_name,
hostname=hostname,
username=username,
password=password,
verify_ssl=verify_ssl,
cert=cert,
cert_common_name=cert_common_name,
)
if "error" in uplink_profiles:
return uplink_profiles
return {"results": uplink_profiles}
def create(
hostname,
username,
password,
display_name,
teaming,
verify_ssl=True,
cert=None,
cert_common_name=None,
lags=None,
mtu=None,
named_teamings=None,
overlay_encap=None,
required_capabilities=None,
tags=None,
transport_vlan=None,
description=None,
):
"""
Creates uplink profile(resource_type: UplinkHostSwitchProfile)
CLI Example:
.. code-block:: bash
salt vm_minion nsxt_uplink_profiles.create hostname=nsxt-manager.local username=admin ...
hostname
The host name of NSX-T manager
username
Username to connect to NSX-T manager
password
Password to connect to NSX-T manager
display_name
The name of the uplink profile
teaming
Default TeamingPolicy associated with this UplinkProfile. Object with following parameters:
Example:
.. code::
{'standby_list':[],'active_list':[{'uplink_name':'uplink3','uplink_type':'PNIC'}],'policy':'FAILOVER_ORDER'}
active_list
List of Uplinks used in active list. Array of Uplink objects.
.. code::
active_list='[
{
"uplink_name": "uplink3",
"uplink_type": "PNIC"
}
]'
Parameters as follows:
uplink_name
Name of this uplink
uplink_type
Type of the uplink. PNIC or LAG
policy
Teaming policy. Required field.
Values could be one among FAILOVER_ORDER, LOADBALANCE_SRCID, LOADBALANCE_SRC_MAC
standby_list
List of Uplinks used in standby list. Array of Uplink objects.
.. code::
standby_list='[
{
"uplink_name": "uplink2",
"uplink_type": "PNIC"
}
]'
Parameters as follows:
uplink_name
Name of this uplink
uplink_type
Type of the uplink. PNIC or LAG
verify_ssl
(Optional) Option to enable/disable SSL verification. Enabled by default.
If set to False, the certificate validation is skipped.
cert
(Optional) Path to the SSL client certificate file to connect to NSX-T manager.
The certificate can be retrieved from browser.
cert_common_name
(Optional) By default, the hostname parameter and the common name in certificate is compared for host name
verification. If the client certificate common name and hostname do not match (in case of self-signed
certificates), specify the certificate common name as part of this parameter. This value is then used to
compare against certificate common name
lags
(Optional) list of LACP group
mtu
(Optional) Maximum Transmission Unit used for uplinks
named_teamings
(Optional) List of named uplink teaming policies that can be used by logical switches.
Array of NamedTeamingPolicy
overlay_encap
(Optional) The protocol used to encapsulate overlay traffic
required_capabilities
(Optional) List of string
tags
(Optional) Opaque identifier meaninful to API user. Array of Tag
transport_vlan
(Optional) VLAN used for tagging Overlay traffic of associated HostSwitch. Type: integer
description
(Optional) Description for the resource
"""
log.info("Creating nsxt uplink profile")
url = UPLINK_PROFILES_BASE_URL.format(hostname)
req_data = common._filter_kwargs(
allowed_kwargs=create_params_for_uplink_profies,
default_dict={},
lags=lags,
mtu=mtu,
named_teamings=named_teamings,
overlay_encap=overlay_encap,
required_capabilities=required_capabilities,
tags=tags,
transport_vlan=transport_vlan,
description=description,
)
req_data["display_name"] = display_name
req_data["resource_type"] = "UplinkHostSwitchProfile"
req_data["teaming"] = teaming
return nsxt_request.call_api(
method="post",
url=url,
username=username,
password=password,
cert_common_name=cert_common_name,
verify_ssl=verify_ssl,
cert=cert,
data=req_data,
)
def update(
hostname,
username,
password,
display_name,
teaming,
uplink_profile_id,
revision,
verify_ssl=True,
cert=None,
cert_common_name=None,
lags=None,
mtu=None,
named_teamings=None,
overlay_encap=None,
required_capabilities=None,
tags=None,
transport_vlan=None,
description=None,
):
"""
Updates uplink profile(resource_type: UplinkHostSwitchProfile)
CLI Example:
.. code-block:: bash
salt vm_minion nsxt_uplink_profiles.update hostname=nsxt-manager.local username=admin ...
hostname
The host name of NSX-T manager
username
Username to connect to NSX-T manager
password
Password to connect to NSX-T manager
display_name
The name of the uplink profile
teaming
Default TeamingPolicy associated with this UplinkProfile. Object with following parameters:
Example:
.. code::
{'standby_list':[],'active_list':[{'uplink_name':'uplink3','uplink_type':'PNIC'}],'policy':'FAILOVER_ORDER'}
active_list
List of Uplinks used in active list. Array of Uplink objects.
.. code::
active_list='[
{
"uplink_name": "uplink3",
"uplink_type": "PNIC"
}
]'
Parameters as follows:
uplink_name
Name of this uplink
uplink_type
Type of the uplink. PNIC or LAG
policy
Teaming policy. Required field.
Values could be one among FAILOVER_ORDER, LOADBALANCE_SRCID, LOADBALANCE_SRC_MAC
standby_list
List of Uplinks used in standby list. Array of Uplink objects.
.. code::
standby_list='[
{
"uplink_name": "uplink2",
"uplink_type": "PNIC"
}
]'
Parameters as follows:
uplink_name
Name of this uplink
uplink_type
Type of the uplink. PNIC or LAG
uplink_profile_id
Unique id provided by NSX-T for uplink profile
revision
_revision property of the uplink profile provided by NSX-T
verify_ssl
Option to enable/disable SSL verification. Enabled by default.
If set to False, the certificate validation is skipped.
cert
Path to the SSL client certificate file to connect to NSX-T manager.
The certificate can be retrieved from browser.
cert_common_name
(Optional) By default, the hostname parameter and the common name in certificate is compared for host name
verification. If the client certificate common name and hostname do not match (in case of self-signed
certificates), specify the certificate common name as part of this parameter. This value is then used to
compare against certificate common name
lags
(Optional) list of LACP group
mtu
(Optional) Maximum Transmission Unit used for uplinks
named_teamings
(Optional) List of named uplink teaming policies that can be used by logical switches. Array of NamedTeamingPolicy
overlay_encap
(Optional) The protocol used to encapsulate overlay traffic
required_capabilities
(Optional) List of string
tags
(Optional) Opaque identifier meaninful to API user. Array of Tag
transport_vlan
(Optional) VLAN used for tagging Overlay traffic of associated HostSwitch. Type: integer
description
(Optional) Description for the resource
"""
log.info("Updating nsxt uplink profile %s", display_name)
url = UPLINK_PROFILES_BASE_URL.format(hostname) + "/{}".format(uplink_profile_id)
req_data = common._filter_kwargs(
allowed_kwargs=create_params_for_uplink_profies,
default_dict={},
lags=lags,
mtu=mtu,
named_teamings=named_teamings,
overlay_encap=overlay_encap,
required_capabilities=required_capabilities,
tags=tags,
transport_vlan=transport_vlan,
description=description,
)
req_data["display_name"] = display_name
req_data["resource_type"] = "UplinkHostSwitchProfile"
req_data["teaming"] = teaming
req_data["_revision"] = revision
return nsxt_request.call_api(
method="put",
url=url,
username=username,
password=password,
cert_common_name=cert_common_name,
verify_ssl=verify_ssl,
cert=cert,
data=req_data,
)
def delete(
hostname,
username,
password,
uplink_profile_id,
verify_ssl=True,
cert=None,
cert_common_name=None,
):
"""
Deletes uplink profile(UplinkHostSwitchProfile)
CLI Example:
.. code-block:: bash
salt vm_minion nsxt_uplink_profiles.delete hostname=nsxt-manager.local username=admin ...
hostname
The host name of NSX-T manager
username
Username to connect to NSX-T manager
password
Password to connect to NSX-T manager
uplink_profile_id
Existing uplink profile id
verify_ssl
(Optional) Option to enable/disable SSL verification. Enabled by default.
If set to False, the certificate validation is skipped.
cert
(Optional) Path to the SSL client certificate file to connect to NSX-T manager.
The certificate can be retrieved from browser.
cert_common_name
(Optional) By default, the hostname parameter and the common name in certificate is compared for host name
verification. If the client certificate common name and hostname do not match (in case of self-signed
certificates), specify the certificate common name as part of this parameter. This value is then used to
compare against certificate common name
"""
log.info("Deleting uplink profile with id %s", uplink_profile_id)
url = UPLINK_PROFILES_BASE_URL.format(hostname) + "/{}".format(uplink_profile_id)
result = nsxt_request.call_api(
method="delete",
url=url,
username=username,
password=password,
cert_common_name=cert_common_name,
verify_ssl=verify_ssl,
cert=cert,
)
return result or {"message": "Deleted uplink profile successfully"} | /saltext.vmware-22.3.7.0rc1.tar.gz/saltext.vmware-22.3.7.0rc1/src/saltext/vmware/modules/nsxt_uplink_profiles.py | 0.703753 | 0.150247 | nsxt_uplink_profiles.py | pypi |
import logging
from saltext.vmware.utils import vmc_constants
from saltext.vmware.utils import vmc_request
from saltext.vmware.utils import vmc_templates
from saltext.vmware.utils import vmc_vcenter_request
log = logging.getLogger(__name__)
__virtualname__ = "vmc_sddc"
def __virtual__():
return __virtualname__
def get(
hostname,
refresh_key,
authorization_host,
org_id,
include_deleted=False,
verify_ssl=True,
cert=None,
):
"""
Retrieves list of SDDCs for the given organization
Please refer the `VMC List All SDDCs documentation <https://developer.vmware.com/docs/vmc/latest/vmc/api/orgs/org/sddcs/get/>`_ to get insight of functionality and input parameters
CLI Example:
.. code-block:: bash
salt minion-key-id vmc_sddc.get hostname=vmc.vmware.com ...
hostname
The host name of VMC
refresh_key
API Token of the user which is used to get the Access Token required for VMC operations
authorization_host
Hostname of the Cloud Services Platform (CSP)
org_id
The Id of organization from which SDDCs are retrieved
include_deleted: Boolean
(Optional) When true, forces the result to also include deleted SDDCs.
verify_ssl
(Optional) Option to enable/disable SSL verification. Enabled by default.
If set to False, the certificate validation is skipped.
cert
(Optional) Path to the SSL client certificate file to connect to VMC Cloud Console.
The certificate can be retrieved from browser.
"""
log.info("Retrieving the List of SDDCs for the given organization %s", org_id)
api_base_url = vmc_request.set_base_url(hostname)
api_url = "{base_url}vmc/api/orgs/{org_id}/sddcs".format(base_url=api_base_url, org_id=org_id)
params = vmc_request._filter_kwargs(
allowed_kwargs=["includeDeleted"],
includeDeleted=include_deleted,
)
return vmc_request.call_api(
method=vmc_constants.GET_REQUEST_METHOD,
url=api_url,
refresh_key=refresh_key,
authorization_host=authorization_host,
description="vmc_sddc.get",
params=params,
verify_ssl=verify_ssl,
cert=cert,
)
def get_by_id(
hostname, refresh_key, authorization_host, org_id, sddc_id, verify_ssl=True, cert=None
):
"""
Returns a SDDC detail for the given SDDC Id
Please refer the `VMC Get SDDC documentation <https://developer.vmware.com/docs/vmc/latest/vmc/api/orgs/org/sddcs/sddc/get/>`_ to get insight of functionality and input parameters
CLI Example:
.. code-block:: bash
salt minion-key-id vmc_sddc.get_by_id hostname=vmc.vmware.com sddc_id=sddc_id...
hostname
The host name of VMC
refresh_key
API Token of the user which is used to get the Access Token required for VMC operations
authorization_host
Hostname of the Cloud Services Platform (CSP)
org_id
The Id of organization to which the SDDC belongs to
sddc_id
sddc_id of the SDDC which will be retrieved
verify_ssl
(Optional) Option to enable/disable SSL verification. Enabled by default.
If set to False, the certificate validation is skipped.
cert
(Optional) Path to the SSL client certificate file to connect to VMC Cloud Console.
The certificate can be retrieved from browser.
"""
log.info("Retrieving the SDDC details for the sddc %s in the organization %s", sddc_id, org_id)
api_base_url = vmc_request.set_base_url(hostname)
api_url = "{base_url}vmc/api/orgs/{org_id}/sddcs/{sddc_id}".format(
base_url=api_base_url, org_id=org_id, sddc_id=sddc_id
)
return vmc_request.call_api(
method=vmc_constants.GET_REQUEST_METHOD,
url=api_url,
refresh_key=refresh_key,
authorization_host=authorization_host,
description="vmc_sddc.get_by_id",
verify_ssl=verify_ssl,
cert=cert,
)
def create(
hostname,
refresh_key,
authorization_host,
org_id,
sddc_name,
num_hosts,
provider,
region,
account_link_config=None,
account_link_sddc_config=None,
deployment_type=None,
host_instance_type=None,
msft_license_config=None,
sddc_id=None,
sddc_template_id=None,
sddc_type=None,
size=None,
skip_creating_vxlan=False,
sso_domain=None,
storage_capacity=None,
vpc_cidr=None,
vxlan_subnet=None,
validate_only=False,
verify_ssl=True,
cert=None,
):
"""
Create a SDDC for given org
Please refer the `VMC Create A New SDDC documentation <https://developer.vmware.com/docs/vmc/latest/vmc/api/orgs/org/sddcs/post/>`_ to get insight of functionality and input parameters
CLI Example:
.. code-block:: bash
salt minion-key-id vmc_sddc.create hostname=vmc.vmware.com ...
hostname
The host name of VMC
refresh_key
API Token of the user which is used to get the Access Token required for VMC operations
authorization_host
Hostname of the Cloud Services Platform (CSP)
org_id
The Id of organization to which the SDDC belongs to
sddc_name: String
(Required) The name of SDDC that will be assigned to new created SDDC
num_hosts: Integer As Int32
(Required) Number of hosts in a SDDC
provider: String
(Required) Determines what additional properties are available based on cloud provider.
Possible values are: AWS , ZEROCLOUD
region: String
(Required) Aws region where SDDC will be deployed
account_link_config
(Optional) The account linking configuration, we will keep this one and remove accountLinkSddcConfig finally.
account_link_config expects to be passed as a dict having delay_account_link parameter
delay_account_link: Boolean
(Optional) Boolean flag identifying whether account linking should be delayed or not for the SDDC.
.. code::
{
"delay_account_link": false
}
account_link_sddc_config : List Of AccountLinkSddcConfig
(Optional) A list of the SDDC linking configurations to use.
AccountLinkSddcConfig has two parameters connected_account_id and customer_subnet_ids
connected_account_id:String
(Optional) The ID of the customer connected account to work with.
customer_subnet_ids: Array of String
(Optional) List of subnet Ids
deployment_type: String
(Optional) Denotes if request is for a SingleAZ or a MultiAZ SDDC. Default is SingleAZ.
Possible values are: SingleAZ , MultiAZ
host_instance_type: String
(Optional) The instance type for the esx hosts in the primary cluster of the SDDC.
Possible values are: i3.metal, r5.metal, i3en.metal
msft_license_config : MsftLicensingConfig
(Optional) Indicates the desired licensing support, if any, of Microsoft software.
sddc_id: String As UUID
(Optional) If provided, will be assigned as SDDC id of the provisioned SDDC.
sddc_template_id : String As UUID
(Optional) If provided, configuration from the template will applied to the provisioned SDDC.
sddc_type: String
(Optional) Denotes the sddc type , if the value is null or empty, the type is considered as default.
size:String
(Optional) The size of the vCenter and NSX appliances. “large” sddcSize corresponds to a ‘large’ vCenter appliance and ‘large’ NSX appliance. ‘medium’ sddcSize corresponds to ‘medium’ vCenter appliance and ‘medium’ NSX appliance. Value defaults to ‘medium’.
Possible values are: nsx_small , medium , large , nsx_large
skip_creating_vxlan : Boolean
(Optional) skip creating vxlan for compute gateway for SDDC provisioning
sso_domain : String
(Optional) The SSO domain name to use for vSphere users. If not specified, vmc.local will be used.
storage_capacity: Integer As Int64
(Optional) The storage capacity value to be requested for the sddc primary cluster, in GiBs. If provided, instead of using the direct-attached storage, a capacity value amount of seperable storage will be used.
vpc_cidr
(Optional) AWS VPC IP range. Only prefix of 16 or 20 is currently supported.
vxlan_subnet : String
(Optional) VXLAN IP subnet in CIDR for compute gateway
validate_only: Boolean
(Optional) When true, only validates the given sddc configuration without provisioning
verify_ssl
(Optional) Option to enable/disable SSL verification. Enabled by default.
If set to False, the certificate validation is skipped.
cert
(Optional) Path to the SSL client certificate file to connect to VMC Cloud Console.
The certificate can be retrieved from browser.
For example:
.. code::
{
"account_link_config": {
"delay_account_link": false
},
"account_link_sddc_config": [
{
"connected_account_id": "string",
"customer_subnet_ids": [
"string"
]
}
],
"deployment_type": "SingleAZ",
"host_instance_type": "i3.metal",
"msft_license_config": {
"mssql_licensing": "string",
"windows_licensing": "string"
},
"sddc_name": "Salt-SDDC-1",
"num_hosts": 0,
"provider": "ZEROCLOUD",
"sddc_id": "string-UUID",
"sddc_template_id": "string",
"sddc_type": "OneNode",
"size": "medium",
"skip_creating_vxlan": false,
"sso_domain": "string",
"storage_capacity": 1,
"vpc_cidr": "string",
"vxlan_subnet": "string",
"region": "us-west-2"
}
"""
log.info("Creating a new SDDC %s in the organization %s", sddc_name, org_id)
api_base_url = vmc_request.set_base_url(hostname)
api_url = "{base_url}vmc/api/orgs/{org_id}/sddcs".format(base_url=api_base_url, org_id=org_id)
allowed_dict = {
"name": sddc_name,
"num_hosts": num_hosts,
"provider": provider,
"region": region,
"account_link_config": account_link_config,
"account_link_sddc_config": account_link_sddc_config,
"deployment_type": deployment_type,
"host_instance_type": host_instance_type,
"msft_license_config": msft_license_config,
"sddc_id": sddc_id,
"sddc_template_id": sddc_template_id,
"sddc_type": sddc_type,
"size": size,
"skip_creating_vxlan": skip_creating_vxlan,
"sso_domain": sso_domain,
"storage_capacity": storage_capacity,
"vpc_cidr": vpc_cidr,
"vxlan_subnet": vxlan_subnet,
}
req_data = vmc_request._filter_kwargs(allowed_kwargs=allowed_dict.keys(), **allowed_dict)
params = vmc_request._filter_kwargs(allowed_kwargs=["validateOnly"], validateOnly=validate_only)
request_data = vmc_request.create_payload_for_request(vmc_templates.create_sddc, req_data)
return vmc_request.call_api(
method=vmc_constants.POST_REQUEST_METHOD,
url=api_url,
refresh_key=refresh_key,
authorization_host=authorization_host,
description="vmc_sddc.create",
data=request_data,
params=params,
verify_ssl=verify_ssl,
cert=cert,
)
def delete(
hostname,
refresh_key,
authorization_host,
org_id,
sddc_id,
force_delete=False,
retain_configuration=False,
template_name=None,
verify_ssl=True,
cert=None,
):
"""
Deletes the Given SDDC
Please refer the `VMC Delete SDDC documentation <https://developer.vmware.com/docs/vmc/latest/vmc/api/orgs/org/sddcs/sddc/delete/>`_ to get insight of functionality and input parameters
CLI Example:
.. code-block:: bash
salt minion-key-id vmc_sddc.delete hostname=vmc.vmware.com sddc_id=sddc_id ...
hostname
The host name of VMC
refresh_key
API Token of the user which is used to get the Access Token required for VMC operations
authorization_host
Hostname of the Cloud Services Platform (CSP)
org_id
The Id of organization to which the SDDC belongs to
sddc_id
sddc_id which will be deleted
force_delete: Boolean
(Optional) If = true, will delete forcefully.
Beware: do not use the force flag if there is a chance an active provisioning or deleting task is running against this SDDC. This option is restricted.
retain_configuration: Boolean
(Optional) If = 'true', the SDDC's configuration is retained as a template for later use.
This flag is applicable only to SDDCs in ACTIVE state.
template_name: String
(Optional) Only applicable when retainConfiguration is also set to 'true'. When set, this value will be used as the name of the SDDC configuration template generated.
verify_ssl
(Optional) Option to enable/disable SSL verification. Enabled by default.
If set to False, the certificate validation is skipped.
cert
(Optional) Path to the SSL client certificate file to connect to VMC Cloud Console.
The certificate can be retrieved from browser.
"""
log.info("Deleting the given SDDC %s", sddc_id)
api_url_base = vmc_request.set_base_url(hostname)
api_url = "{base_url}vmc/api/orgs/{org_id}/sddcs/{sddc_id}".format(
base_url=api_url_base, org_id=org_id, sddc_id=sddc_id
)
params = vmc_request._filter_kwargs(
allowed_kwargs=["force", "retain_configuration", "template_name"],
force=force_delete,
retain_configuration=retain_configuration,
template_name=template_name,
)
return vmc_request.call_api(
method=vmc_constants.DELETE_REQUEST_METHOD,
url=api_url,
refresh_key=refresh_key,
authorization_host=authorization_host,
description="vmc_sddc.delete",
params=params,
verify_ssl=verify_ssl,
cert=cert,
)
def update_name(
hostname,
refresh_key,
authorization_host,
org_id,
sddc_id,
sddc_new_name,
verify_ssl=True,
cert=None,
):
"""
Updates name for Given SDDC
Please refer the `VMC Patch SDDC documentation <https://developer.vmware.com/docs/vmc/latest/vmc/api/orgs/org/sddcs/sddc/patch/>`_ to get insight of functionality and input parameters
CLI Example:
.. code-block:: bash
salt minion-key-id vmc_sddc.update_name hostname=vmc.vmware.com sddc_id ...
hostname
The host name of VMC
refresh_key
API Token of the user which is used to get the Access Token required for VMC operations
authorization_host
Hostname of the Cloud Services Platform (CSP)
org_id
The Id of organization to which the SDDC belongs to
sddc_id
sddc_id for which name will update
sddc_new_name
The new name of the SDDC to be changed to
verify_ssl
(Optional) Option to enable/disable SSL verification. Enabled by default.
If set to False, the certificate validation is skipped.
cert
(Optional) Path to the SSL client certificate file to connect to VMC Cloud Console.
The certificate can be retrieved from browser.
"""
log.info("Updating sddc name for the SDDC %s", sddc_id)
api_url_base = vmc_request.set_base_url(hostname)
api_url = "{base_url}vmc/api/orgs/{org_id}/sddcs/{sddc_id}".format(
base_url=api_url_base, org_id=org_id, sddc_id=sddc_id
)
data = {"name": sddc_new_name}
return vmc_request.call_api(
method=vmc_constants.PATCH_REQUEST_METHOD,
url=api_url,
refresh_key=refresh_key,
authorization_host=authorization_host,
description="vmc_sddc.update_name",
data=data,
verify_ssl=verify_ssl,
cert=cert,
)
def get_vcenter_detail(
hostname, refresh_key, authorization_host, org_id, sddc_id, verify_ssl=True, cert=None
):
"""
Retrieves vcenter Details from the Given SDDC
Please refer the `VMC Get SDDC documentation <https://developer.vmware.com/docs/vmc/latest/vmc/api/orgs/org/sddcs/sddc/get/>`_ to get insight of functionality and input parameters
CLI Example:
.. code-block:: bash
salt minion-key-id vmc_sddc.get_vcenter_detail hostname=vmc.vmware.com sddc_id ...
hostname
The host name of VMC
refresh_key
API Token of the user which is used to get the Access Token required for VMC operations
authorization_host
Hostname of the Cloud Services Platform (CSP)
org_id
The Id of organization to which the SDDC belongs to
sddc_id
sddc_id from which vcenter details should be retrieved
verify_ssl
(Optional) Option to enable/disable SSL verification. Enabled by default.
If set to False, the certificate validation is skipped.
cert
(Optional) Path to the SSL client certificate file to connect to VMC Cloud Console.
The certificate can be retrieved from browser.
"""
log.info("Retrieving the vCenter detail for the given SDDC %s", sddc_id)
sddc_detail = get_by_id(
hostname, refresh_key, authorization_host, org_id, sddc_id, verify_ssl, cert
)
if "error" in sddc_detail:
return sddc_detail
vcenter_url = sddc_detail["resource_config"]["vc_url"]
username = sddc_detail["resource_config"]["cloud_username"]
password = sddc_detail["resource_config"]["cloud_password"]
vcenter_detail = {
"vcenter_url": vcenter_url,
"username": username,
"password": password,
}
result = {"description": "vmc_sddc.get_vcenter_detail", "vcenter_detail": vcenter_detail}
return result
def get_vms(
hostname,
username,
password,
clusters=None,
datacenters=None,
folders=None,
hosts=None,
names=None,
power_states=None,
resource_pools=None,
vms=None,
verify_ssl=True,
cert=None,
):
"""
Retrieves the virtual machines from the SDDC vcenter
Please refer the `vCenter Get VM List documentation <https://developer.vmware.com/apis/vsphere-automation/latest/vcenter/api/vcenter/vm/get/>`_ to get insight of functionality and input parameters
CLI Example:
.. code-block:: bash
salt <minion-key-id> vmc_sddc.get_vms hostname=vmc.vmware.com ...
hostname
The host name of vCenter console
username
username required to login to vCenter console
password
password required to login to vCenter console
clusters: Array Of String
(Optional) Clusters that must contain the virtual machine for the virtual machine to match the filter. If unset or empty, virtual machines in any cluster match the filter.
When clients pass a value of this structure as a parameter, the field must contain identifiers for the resource type: ClusterComputeResource.
When operations return a value of this structure as a result, the field will contain identifiers for the resource type: ClusterComputeResource.
datacenters: Array Of String
(Optional)datacenters that must contain the virtual machine for the virtual machine to match the filter.
If unset or empty, virtual machines in any datacenter match the filter.
When clients pass a value of this structure as a parameter, the field must contain identifiers for the resource type: Datacenter.
When operations return a value of this structure as a result, the field will contain identifiers for the resource type: Datacenter.
folders: Array Of String
(Optional) Folders that must contain the virtual machine for the virtual machine to match the filter.
If unset or empty, virtual machines in any folder match the filter.
When clients pass a value of this structure as a parameter, the field must contain identifiers for the resource type: Folder.
When operations return a value of this structure as a result, the field will contain identifiers for the resource type: Folder.
hosts: Array Of String
(Optional) Hosts that must contain the virtual machine for the virtual machine to match the filter.
If unset or empty, virtual machines on any host match the filter.
When clients pass a value of this structure as a parameter, the field must contain identifiers for the resource type: HostSystem.
When operations return a value of this structure as a result, the field will contain identifiers for the resource type: HostSystem.
names: Array Of String
(Optional) Names that virtual machines must have to match the filter (see VM.Info.name).
If unset or empty, virtual machines with any name match the filter.
power_states: Array Of VmPowerState
(Optional) Power states that a virtual machine must be in to match the filter
The Power.State enumerated type defines the valid power states for a virtual machine.
POWERED_OFF : The virtual machine is powered off.
POWERED_ON : The virtual machine is powered on.
SUSPENDED : The virtual machine is suspended.
If unset or empty, virtual machines in any power state match the filter.
resource_pools: Array Of String
(Optional) Resource pools that must contain the virtual machine for the virtual machine to match the filter.
If unset or empty, virtual machines in any resource pool match the filter.
When clients pass a value of this structure as a parameter, the field must contain identifiers for the resource type: ResourcePool.
When operations return a value of this structure as a result, the field will contain identifiers for the resource type: ResourcePool.
vms: Array Of String
(Optional) Identifiers of virtual machines that can match the filter.
If unset or empty, virtual machines with any identifier match the filter.
When clients pass a value of this structure as a parameter, the field must contain identifiers for the resource type: VirtualMachine.
When operations return a value of this structure as a result, the field will contain identifiers for the resource type: VirtualMachine.
verify_ssl
(Optional) Option to enable/disable SSL verification. Enabled by default.
If set to False, the certificate validation is skipped.
cert
(Optional) Path to the SSL client certificate file to connect to VMC Cloud Console.
The certificate can be retrieved from browser.
"""
log.info("Retrieving the virtual machines from the SDDC vCenter")
api_url_base = vmc_request.set_base_url(hostname)
api_url = "{base_url}api/vcenter/vm"
api_url = api_url.format(base_url=api_url_base)
allowed_dict = {
"clusters": clusters,
"datacenters": datacenters,
"folders": folders,
"hosts": hosts,
"names": names,
"power_states": power_states,
"resource_pools": resource_pools,
"vms": vms,
}
params = vmc_request._filter_kwargs(allowed_kwargs=allowed_dict.keys(), **allowed_dict)
headers = vmc_vcenter_request.get_headers(hostname, username, password)
return vmc_vcenter_request.call_api(
method=vmc_constants.GET_REQUEST_METHOD,
url=api_url,
headers=headers,
description="vmc_sddc.get_vms",
params=params,
verify_ssl=verify_ssl,
cert=cert,
)
def get_vms_by_sddc_id(
hostname, refresh_key, authorization_host, org_id, sddc_id, verify_ssl=True, cert=None
):
"""
Retrieves the virtual machines for the given SDDC.
Please refer the `vCenter Get VM List documentation <https://developer.vmware.com/apis/vsphere-automation/latest/vcenter/api/vcenter/vm/get/>`_ to get insight of functionality and input parameters
CLI Example:
.. code-block:: bash
salt minion-key-id vm_minion vmc_sddc.get_vms_by_sddc_id hostname=vmc.vmware.com sddc_id ...
hostname
The host name of VMC
refresh_key
API Token of the user which is used to get the Access Token required for VMC operations
authorization_host
Hostname of the Cloud Services Platform (CSP)
org_id
The Id of organization to which the SDDC belongs to
sddc_id
Id of the SDDC for which VMs are retrieved
verify_ssl
(Optional) Option to enable/disable SSL verification. Enabled by default.
If set to False, the certificate validation is skipped.
cert
(Optional) Path to the SSL client certificate file to connect to VMC Cloud Console.
The certificate can be retrieved from browser.
"""
log.info("Retrieving the virtual machines for the given SDDC %s", sddc_id)
vcenter_detail_result = get_vcenter_detail(
hostname, refresh_key, authorization_host, org_id, sddc_id, verify_ssl, cert
)
if "error" in vcenter_detail_result:
return vcenter_detail_result
vcenter_detail = vcenter_detail_result["vcenter_detail"]
vcenter_url = vcenter_detail["vcenter_url"]
username = vcenter_detail["username"]
password = vcenter_detail["password"]
api_url = "{base_url}api/vcenter/vm"
api_url = api_url.format(base_url=vcenter_url)
vcenter_hostname = vcenter_url[8 : len(vcenter_url) - 1]
return get_vms(
hostname=vcenter_hostname,
username=username,
password=password,
verify_ssl=verify_ssl,
cert=cert,
) | /saltext.vmware-22.3.7.0rc1.tar.gz/saltext.vmware-22.3.7.0rc1/src/saltext/vmware/modules/vmc_sddc.py | 0.699254 | 0.152789 | vmc_sddc.py | pypi |
import json
import logging
from saltext.vmware.utils import common
from saltext.vmware.utils import nsxt_request
log = logging.getLogger(__name__)
__virtualname__ = "nsxt_compute_manager"
BASE_URL = "https://{management_host}/api/v1/fabric/compute-managers"
def __virtual__():
return __virtualname__
def get(
hostname,
username,
password,
verify_ssl=True,
cert=None,
cert_common_name=None,
cursor=None,
included_fields=None,
origin_type=None,
page_size=None,
server=None,
sort_by=None,
sort_ascending=None,
):
"""
Lists compute managers registered to NSX Manager
CLI Example:
.. code-block:: bash
salt vm_minion compute_manager.get hostname=nsxt-manager.local username=admin ...
hostname
The host name of NSX-T manager
username
Username to connect to NSX-T manager
password
Password to connect to NSX-T manager
verify_ssl
(Optional) Option to enable/disable SSL verification. Enabled by default.
If set to False, the certificate validation is skipped.
cert
(Optional) Path to the SSL certificate file to connect to NSX-T manager.
The certificate can be retrieved from browser.
cert_common_name
(Optional) By default, the hostname parameter and the common name in certificate is compared for host name
verification. If the client certificate common name and hostname do not match (in case of self-signed certificates),
specify the certificate common name as part of this parameter. This value is then used to compare against
certificate common name.
cursor
(Optional) Opaque cursor to be used for getting next page of records (supplied by current result page)
included_fields
(Optional) Comma separated list of fields that should be included in query result
origin_type
(Optional) Compute manager type like vCenter
page_size
(Optional) Maximum number of results to return in this page
server
(Optional) IP address or hostname of compute manager
sort_by
(Optional) Field by which records are sorted
sort_ascending
(Optional) Boolean value to sort result in ascending order
"""
url = BASE_URL.format(management_host=hostname)
log.info("Retrieving compute managers from NSX Manager {}".format(hostname))
params = common._filter_kwargs(
allowed_kwargs=(
"server",
"cursor",
"included_fields",
"origin_type",
"page_size",
"sort_by",
"sort_ascending",
),
server=server,
cursor=cursor,
included_fields=included_fields,
origin_type=origin_type,
page_size=page_size,
sort_by=sort_by,
sort_ascending=sort_ascending,
)
return nsxt_request.call_api(
method="get",
url=url,
username=username,
password=password,
cert_common_name=cert_common_name,
verify_ssl=verify_ssl,
cert=cert,
params=params,
)
def get_by_display_name(
hostname,
username,
password,
display_name,
verify_ssl=True,
cert=None,
cert_common_name=None,
cursor=None,
included_fields=None,
origin_type=None,
page_size=None,
server=None,
sort_by=None,
sort_ascending=None,
):
"""
Lists compute managers registered to NSX Manager by given display_name
CLI Example:
.. code-block:: bash
salt vm_minion compute_manager.get_by_display_name hostname=nsxt-manager.local username=admin ...
hostname
The host name of NSX-T manager
username
Username to connect to NSX-T manager
password
Password to connect to NSX-T manager
display_name
Display-name of the compute manager
verify_ssl
(Optional) Option to enable/disable SSL verification. Enabled by default.
If set to False, the certificate validation is skipped.
cert
(Optional) Path to the SSL certificate file to connect to NSX-T manager.
The certificate can be retrieved from browser.
cert_common_name
(Optional) By default, the hostname parameter and the common name in certificate is compared for host name
verification. If the client certificate common name and hostname do not match (in case of self-signed certificates),
specify the certificate common name as part of this parameter. This value is then used to compare against
certificate common name.
cursor
(Optional) Opaque cursor to be used for getting next page of records (supplied by current result page)
included_fields
(Optional) Comma separated list of fields that should be included in query result
origin_type
(Optional) Compute manager type like vCenter
page_size
(Optional) Maximum number of results to return in this page
server
(Optional) IP address or hostname of compute manager
sort_by
(Optional) Field by which records are sorted
sort_ascending
(Optional) Boolean value to sort result in ascending order
"""
log.info("Finding compute managers with display name: %s", display_name)
compute_managers = common._read_paginated(
func=get,
display_name=display_name,
cursor=cursor,
included_fields=included_fields,
origin_type=origin_type,
page_size=page_size,
server=server,
sort_by=sort_by,
sort_ascending=sort_ascending,
hostname=hostname,
username=username,
password=password,
verify_ssl=verify_ssl,
cert=cert,
cert_common_name=cert_common_name,
)
if "error" in compute_managers:
return compute_managers
return {"results": compute_managers}
def register(
hostname,
username,
password,
compute_manager_server,
credential,
server_origin_type="vCenter",
display_name=None,
description=None,
set_as_oidc_provider=None,
verify_ssl=True,
cert=None,
cert_common_name=None,
):
"""
Lists compute managers registered to NSX Manager
CLI Example:
.. code-block:: bash
salt vm_minion compute_manager.register hostname=nsxt-manager.local username=admin ...
hostname
The host name of NSX-T manager
username
Username to connect to NSX-T manager
password
Password to connect to NSX-T manager
compute_manager_server
Compute manager server FQDN or IP
credential
An object which contains credential details to validate compute manager
Sample usage in sls file:
.. code::
credential:
credential_type: "UsernamePasswordLoginCredential"
username: "user"
password: "pass"
thumbprint: "36:XX:XX:XX:XX:XX:XX66"
credential_type
Type of credential provided. For now only UsernamePasswordLoginCredential is supported.
username
Username of the compute manager
password
Password of the compute manager
thumbprint
Thumbprint of the compute manager
server_origin_type
(Optional) Server origin type of the compute manager. Default is vCenter
display_name
(Optional) Display name of the compute manager
description
(Optional) description for the compute manager
set_as_oidc_provider
(Optional) Specifies whether compute manager has been set as OIDC provider. Default is false
If the compute manager is VC and need to set set as OIDC provider for NSX then this flag should be set as true.
verify_ssl
(Optional) Option to enable/disable SSL verification. Enabled by default.
If set to False, the certificate validation is skipped.
cert
(Optional) Path to the SSL certificate file to connect to NSX-T manager.
The certificate can be retrieved from browser.
cert_common_name
(Optional) By default, the hostname parameter and the common name in certificate is compared for host name verification.
If the client certificate common name and hostname do not match (in case of self-signed certificates),
specify the certificate common name as part of this parameter. This value is then used to compare against
certificate common name.
"""
url = BASE_URL.format(management_host=hostname)
log.info("Going to register new compute manager")
data = common._filter_kwargs(
allowed_kwargs=("description", "display_name", "set_as_oidc_provider"),
default_dict={
"server": compute_manager_server,
"origin_type": server_origin_type,
"credential": credential,
},
display_name=display_name,
description=description,
set_as_oidc_provider=set_as_oidc_provider,
)
return nsxt_request.call_api(
method="post",
url=url,
username=username,
password=password,
cert_common_name=cert_common_name,
verify_ssl=verify_ssl,
cert=cert,
data=data,
)
def update(
hostname,
username,
password,
compute_manager_server,
compute_manager_id,
credential,
compute_manager_revision,
server_origin_type="vCenter",
verify_ssl=True,
cert=None,
cert_common_name=None,
display_name=None,
description=None,
set_as_oidc_provider=None,
):
"""
Updates compute manager registered to NSX Manager
CLI Example:
.. code-block:: bash
salt vm_minion compute_manager.update hostname=nsxt-manager.local username=admin ...
hostname
The host name of NSX-T manager
username
Username to connect to NSX-T manager
password
Password to connect to NSX-T manager
compute_manager_server
Compute manager server FQDN or IP
compute_manager_id
Unique Id of the compute manager provided by NSX-T
compute_manager_revision
Latest value of _revision property for compute manager.
credential
An object which contains credential details to validate compute manager
Sample usage in sls file:
.. code::
credential:
credential_type: "UsernamePasswordLoginCredential"
username: "user"
password: "pass"
thumbprint: "36:XX:XX:XX:XX:XX:XX66"
credential_type
Type of credential provided. For now only UsernamePasswordLoginCredential is supported.
username
Username of the compute manager
password
Password of the compute manager
thumbprint
Thumbprint of the compute manager
server_origin_type
(Optional) Server origin type of the compute manager. Default is vCenter
display_name
(Optional) Display name of the compute manager
description
(Optional) description for the compute manager
set_as_oidc_provider
(Optional) Specifies whether compute manager has been set as OIDC provider. Default is false
If the compute manager is VC and need to set set as OIDC provider for NSX then this flag should be set as true.
verify_ssl
(Optional) Option to enable/disable SSL verification. Enabled by default.
If set to False, the certificate validation is skipped.
cert
(Optional) Path to the SSL certificate file to connect to NSX-T manager.
The certificate can be retrieved from browser.
cert_common_name
(Optional) By default, the hostname parameter and the common name in certificate is compared for host name verification.
If the client certificate common name and hostname do not match (in case of self-signed certificates),
specify the certificate common name as part of this parameter. This value is then used to compare against
certificate common name.
"""
url = (BASE_URL + "/{compute_manager_id}").format(
management_host=hostname, compute_manager_id=compute_manager_id
)
log.info("Going to update compute manager")
data = {
"server": compute_manager_server,
"origin_type": server_origin_type,
"credential": credential,
"id": compute_manager_id,
"_revision": compute_manager_revision,
}
optional_params = ("description", "display_name", "set_as_oidc_provider")
data = common._filter_kwargs(
allowed_kwargs=optional_params,
default_dict=data,
display_name=display_name,
description=description,
set_as_oidc_provider=set_as_oidc_provider,
)
return nsxt_request.call_api(
method="put",
url=url,
username=username,
password=password,
cert_common_name=cert_common_name,
verify_ssl=verify_ssl,
cert=cert,
data=data,
)
def remove(
hostname,
username,
password,
compute_manager_id,
verify_ssl=True,
cert=None,
cert_common_name=None,
):
"""
De-registers compute manager from NSX-T if exists
CLI Example:
.. code-block:: bash
salt vm_minion compute_manager.remove hostname=nsxt-manager.local username=admin ...
hostname
The host name of NSX-T manager
username
Username to connect to NSX-T manager
password
Password to connect to NSX-T manager
compute_manager_id
NSX-T unique id for the compute manager. Needed in case of updating compute manager details
verify_ssl
(Optional) Option to enable/disable SSL verification. Enabled by default.
If set to False, the certificate validation is skipped.
cert
(Optional) Path to the SSL certificate file to connect to NSX-T manager.
The certificate can be retrieved from browser.
cert_common_name
(Optional) By default, the hostname parameter and the common name in certificate is compared for host name verification.
If the client certificate common name and hostname do not match (in case of self-signed certificates),
specify the certificate common name as part of this parameter. This value is then used to compare against
certificate common name.
"""
url = (BASE_URL + "/{id}").format(management_host=hostname, id=compute_manager_id)
log.info("Going to remove compute manager registration")
result = nsxt_request.call_api(
method="delete",
url=url,
username=username,
password=password,
cert_common_name=cert_common_name,
verify_ssl=verify_ssl,
cert=cert,
)
return result or {"message": "Removed compute manager successfully"} | /saltext.vmware-22.3.7.0rc1.tar.gz/saltext.vmware-22.3.7.0rc1/src/saltext/vmware/modules/nsxt_compute_manager.py | 0.739234 | 0.169887 | nsxt_compute_manager.py | pypi |
import logging
from saltext.vmware.utils import common
from saltext.vmware.utils import nsxt_request
log = logging.getLogger(__name__)
__virtual_name__ = "nsxt_transport_node_profiles"
def __virtual__():
return __virtual_name__
TRANSPORT_NODE_PROFILE_BASE_URL = "https://{0}/api/v1/transport-node-profiles"
create_params_for_transport_profiles = [
"transport_zone_endpoints",
"description",
"ignore_overridden_hosts",
]
def get(
hostname,
username,
password,
verify_ssl=True,
cert=None,
cert_common_name=None,
cursor=None,
included_fields=None,
page_size=None,
sort_ascending=None,
sort_by=None,
):
"""
Lists NSX-T transport node profiles
CLI Example:
.. code-block:: bash
salt vm_minion nsxt_transport_node_profiles.get hostname=nsxt-manager.local username=admin ...
hostname
The host name of NSX-T manager
username
Username to connect to NSX-T manager
password
Password to connect to NSX-T manager
verify_ssl
(Optional) Option to enable/disable SSL verification. Enabled by default.
If set to False, the certificate validation is skipped.
cert
(Optional) Path to the SSL certificate file to connect to NSX-T manager.
The certificate can be retrieved from browser.
cert_common_name
(Optional) By default, the hostname parameter and the common name in certificate is compared for host name
verification. If the client certificate common name and hostname do not match (in case of self-signed certificates),
specify the certificate common name as part of this parameter. This value is then used to compare against
certificate common name.
cursor
(Optional) Opaque cursor to be used for getting next page of records (supplied by current result page)
included_fields
(Optional) Comma separated list of fields that should be included in query result
page_size
(Optional) Maximum number of results to return in this page
sort_ascending
(Optional) Boolean. Specifies sorting order
sort_by
(Optional) Field by which records are sorted
For more information, see `VMware API docs for NSX-T <https://code.vmware.com/apis/1163/nsx-t>`_
"""
log.info("Fetching NSX-T transport node profiles")
url = TRANSPORT_NODE_PROFILE_BASE_URL.format(hostname)
params = common._filter_kwargs(
allowed_kwargs=["cursor", "included_fields", "page_size", "sort_ascending", "sort_by"],
default_dict=None,
cursor=cursor,
included_fields=included_fields,
page_size=page_size,
sort_by=sort_by,
sort_ascending=sort_ascending,
)
return nsxt_request.call_api(
method="get",
url=url,
username=username,
password=password,
cert_common_name=cert_common_name,
verify_ssl=verify_ssl,
cert=cert,
params=params,
)
def get_by_display_name(
hostname,
username,
password,
display_name,
verify_ssl=True,
cert=None,
cert_common_name=None,
):
"""
Gets nsxt transport node profiles present in the NSX-T Manager with given display_name.
CLI Example:
.. code-block:: bash
salt vm_minion nsxt_transport_node_profiles.get_by_display_name hostname=nsxt-manager.local username=admin ...
hostname
The host name of NSX-T manager
username
Username to connect to NSX-T manager
password
Password to connect to NSX-T manager
display_name
The name of the transport node profile
verify_ssl
(Optional) Option to enable/disable SSL verification. Enabled by default.
If set to False, the certificate validation is skipped.
cert
(Optional) Path to the SSL client certificate file to connect to NSX-T manager.
The certificate can be retrieved from browser.
cert_common_name
(Optional) By default, the hostname parameter and the common name in certificate is compared for host name
verification. If the client certificate common name and hostname do not match (in case of self-signed
certificates), specify the certificate common name as part of this parameter. This value is then used to
compare against certificate common name
"""
log.info("Finding transport node profiles with display name: %s", display_name)
transport_node_profiles = common._read_paginated(
func=get,
display_name=display_name,
hostname=hostname,
username=username,
password=password,
verify_ssl=verify_ssl,
cert=cert,
cert_common_name=cert_common_name,
)
if "error" in transport_node_profiles:
return transport_node_profiles
return {"results": transport_node_profiles}
def create(
hostname,
username,
password,
display_name,
host_switch_spec,
verify_ssl=True,
cert=None,
cert_common_name=None,
transport_zone_endpoints=None,
description=None,
ignore_overridden_hosts=None,
):
"""
Creates transport node profile with the data payload provided.
CLI Example:
.. code-block:: bash
salt vm_minion nsxt_transport_node_profiles.create hostname=nsxt-manager.local username=admin ...
hostname
The host name of NSX-T manager
username
Username to connect to NSX-T manager
password
Password to connect to NSX-T manager
display_name
The name of the transport node profile
verify_ssl
(Optional) Option to enable/disable SSL verification. Enabled by default.
If set to False, the certificate validation is skipped.
cert
(Optional) Path to the SSL client certificate file to connect to NSX-T manager.
The certificate can be retrieved from browser.
cert_common_name
(Optional) By default, the hostname parameter and the common name in certificate is compared for host name
verification. If the client certificate common name and hostname do not match (in case of self-signed
certificates), specify the certificate common name as part of this parameter. This value is then used to
compare against certificate common name
description
(Optional) Description of this resource
display_name
(Optional) Display name for the transport node profile. Defaults to nsxt id, if not set
host_switch_spec
(Optional) Transport node host switch specification
The HostSwitchSpec is the base class for standard and preconfigured host switch specifications.
Only standard host switches are supported in the transport node profile.
transport_zone_endpoints
(Optional) This is deprecated. TransportZoneEndPoints should be specified per host switch at
StandardHostSwitch through Transport Node or Transport Node Profile configuration. Array of transport zones.
ignore_overridden_hosts
(Optional) Boolean which determines if cluster-level configuration should be applied on overridden hosts. Default: False
.. code-block:: yaml
hostname: <nsxt-hostname>
username: <nsxt-username>
password: <nsxt-password>
verify-ssl: <True/False>
display_name: <display_name>
description: <description>
host_switch_spec:
resource_type: StandardHostSwitchSpec
host_switches:
- host_switch_name: nvds1
host_switch_type: NVDS/VDS
host_switch_mode: STANDARD
host_switch_profile_ids:
- key: UplinkHostSwitchProfile
value: <Respective nsxt id>
- key: LldpHostSwitchProfile
value: <Respective nsxt id>
- key: NiocProfile
value: <Respective nsxt id>
is_migrate_pnics: false
ip_assignment_spec:
resource_type: AssignedByDhcp
transport_zone_endpoints:
- transport_zone_id: <Respective nsxt id>
"""
log.info("Creating nsxt transport node profile")
url = TRANSPORT_NODE_PROFILE_BASE_URL.format(hostname)
req_data = common._filter_kwargs(
allowed_kwargs=create_params_for_transport_profiles,
default_dict={},
transport_zone_endpoints=transport_zone_endpoints,
description=description,
ignore_overridden_hosts=ignore_overridden_hosts,
)
req_data["resource_type"] = "TransportNodeProfile"
req_data["display_name"] = display_name
req_data["host_switch_spec"] = host_switch_spec
req_data["host_switch_spec"]["resource_type"] = "StandardHostSwitchSpec"
return nsxt_request.call_api(
method="post",
url=url,
username=username,
password=password,
cert_common_name=cert_common_name,
verify_ssl=verify_ssl,
cert=cert,
data=req_data,
)
def update(
hostname,
username,
password,
display_name,
host_switch_spec,
transport_node_profile_id,
revision,
verify_ssl=True,
cert=None,
cert_common_name=None,
transport_zone_endpoints=None,
description=None,
ignore_overridden_hosts=None,
):
"""
Updates transport node profile with the data payload provided.
CLI Example:
.. code-block:: bash
salt vm_minion nsxt_transport_node_profiles.update hostname=nsxt-manager.local username=admin ...
hostname
The host name of NSX-T manager
username
Username to connect to NSX-T manager
password
Password to connect to NSX-T manager
display_name
(Optional) The name of the transport node profile
verify_ssl
(Optional) Option to enable/disable SSL verification. Enabled by default.
If set to False, the certificate validation is skipped.
cert
(Optional) Path to the SSL client certificate file to connect to NSX-T manager.
The certificate can be retrieved from browser.
cert_common_name
(Optional) By default, the hostname parameter and the common name in certificate is compared for host name
verification. If the client certificate common name and hostname do not match (in case of self-signed
certificates), specify the certificate common name as part of this parameter. This value is then used to
compare against certificate common name
transport_node_profile_id
Unique id provided by NSX-T for transport node profile
revision
_revision property of the transport node profile provided by NSX-T
description
(Optional) Description of this resource
display_name
(Optional) Display name for the transport node profile. Defaults to nsxt id, if not set
host_switch_spec
(Optional) Transport node host switch specification
The HostSwitchSpec is the base class for standard and preconfigured host switch specifications.
Only standard host switches are supported in the transport node profile.
transport_zone_endpoints
(Optional) This is deprecated. TransportZoneEndPoints should be specified per host switch at
StandardHostSwitch through Transport Node or Transport Node Profile configuration. Array of transport zones.
ignore_overridden_hosts
(Optional) Boolean which determines if cluster-level configuration should be applied on overridden hosts. Default: False
.. code-block:: yaml
hostname: <nsxt-hostname>
username: <nsxt-username>
password: <nsxt-password>
verify-ssl: <True/False>
display_name: <display_name>
description: <description>
host_switch_spec:
resource_type: StandardHostSwitchSpec
host_switches:
- host_switch_name: nvds1
host_switch_type: NVDS/VDS
host_switch_mode: STANDARD
host_switch_profile_ids:
- key: UplinkHostSwitchProfile
value: <Respective nsxt id>
- key: LldpHostSwitchProfile
value: <Respective nsxt id>
- key: NiocProfile
value: <Respective nsxt id>
is_migrate_pnics: false
ip_assignment_spec:
resource_type: AssignedByDhcp
transport_zone_endpoints:
- transport_zone_id: <Respective nsxt id>
"""
log.info("Updating nsxt transport node profile")
url = TRANSPORT_NODE_PROFILE_BASE_URL.format(hostname) + "/{}".format(transport_node_profile_id)
req_data = common._filter_kwargs(
allowed_kwargs=create_params_for_transport_profiles,
default_dict={},
transport_zone_endpoints=transport_zone_endpoints,
description=description,
ignore_overridden_hosts=ignore_overridden_hosts,
)
req_data["resource_type"] = "TransportNodeProfile"
req_data["display_name"] = display_name
req_data["host_switch_spec"] = host_switch_spec
req_data["host_switch_spec"]["resource_type"] = "StandardHostSwitchSpec"
req_data["_revision"] = revision
return nsxt_request.call_api(
method="put",
url=url,
username=username,
password=password,
cert_common_name=cert_common_name,
verify_ssl=verify_ssl,
cert=cert,
data=req_data,
)
def delete(
hostname,
username,
password,
transport_node_profile_id,
verify_ssl=True,
cert=None,
cert_common_name=None,
):
"""
Deletes transport node profile
CLI Example:
.. code-block:: bash
salt vm_minion nsxt_transport_node_profiles.delete hostname=nsxt-manager.local username=admin ...
hostname
The host name of NSX-T manager
username
Username to connect to NSX-T manager
password
Password to connect to NSX-T manager
transport_node_profile_id
Existing transport node profile id
verify_ssl
(Optional) Option to enable/disable SSL verification. Enabled by default.
If set to False, the certificate validation is skipped.
cert
(Optional) Path to the SSL client certificate file to connect to NSX-T manager.
The certificate can be retrieved from browser.
cert_common_name
(Optional) By default, the hostname parameter and the common name in certificate is compared for host name
verification. If the client certificate common name and hostname do not match (in case of self-signed
certificates), specify the certificate common name as part of this parameter. This value is then used to
compare against certificate common name
"""
log.info("Deleting transport node profile with id %s", transport_node_profile_id)
url = TRANSPORT_NODE_PROFILE_BASE_URL.format(hostname) + "/{}".format(transport_node_profile_id)
result = nsxt_request.call_api(
method="delete",
url=url,
username=username,
password=password,
cert_common_name=cert_common_name,
verify_ssl=verify_ssl,
cert=cert,
)
return result or {"message": "Deleted transport node profile successfully"} | /saltext.vmware-22.3.7.0rc1.tar.gz/saltext.vmware-22.3.7.0rc1/src/saltext/vmware/modules/nsxt_transport_node_profiles.py | 0.773473 | 0.156201 | nsxt_transport_node_profiles.py | pypi |
import logging
import salt.exceptions
import saltext.vmware.utils.common as utils_common
import saltext.vmware.utils.datacenter as utils_datacenter
from saltext.vmware.utils.connect import get_service_instance
log = logging.getLogger(__name__)
try:
from pyVmomi import vim
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
__virtualname__ = "vmware_folder"
def __virtual__():
if not HAS_PYVMOMI:
return False, "Unable to import pyVmomi module."
return __virtualname__
def create(folder_name, dc_name, type, service_instance=None):
"""
Creates a folder on a given datacenter.
folder_name
Name of folder.
dc_name
Name of datacenter where folder will be created.
type
(vm, host, datastore, network) Type of folder to be created.
service_instance
(optional) The Service Instance from which to obtain managed object references.
"""
if service_instance is None:
service_instance = get_service_instance(opts=__opts__, pillar=__pillar__)
dc_ref = utils_datacenter.get_datacenter(service_instance, dc_name)
folder = utils_common.get_mor_by_property(
service_instance, vim.Folder, folder_name, "name", dc_ref
)
if type == "vm":
dc_ref.vmFolder.CreateFolder(folder_name)
elif type == "host":
dc_ref.hostFolder.CreateFolder(folder_name)
elif type == "datastore":
dc_ref.datastoreFolder.CreateFolder(folder_name)
elif type == "network":
dc_ref.networkFolder.CreateFolder(folder_name)
else:
raise salt.exceptions.CommandExecutionError("invalid type")
return {"status": "created"}
def destroy(folder_name, dc_name, type, service_instance=None):
"""
Destroy a folder on a given datacenter.
folder_name
Name of folder.
dc_name
Name of datacenter where folder will be Destroyed.
type
(vm, host, datastore, network) Type of folder to be destroyed.
service_instance
(optional) The Service Instance from which to obtain managed object references.
"""
if service_instance is None:
service_instance = get_service_instance(opts=__opts__, pillar=__pillar__)
dc_ref = utils_datacenter.get_datacenter(service_instance, dc_name)
if type == "vm":
folder = utils_common.get_mor_by_property(
service_instance, vim.Folder, folder_name, "name", dc_ref.vmFolder
)
elif type == "host":
folder = utils_common.get_mor_by_property(
service_instance, vim.Folder, folder_name, "name", dc_ref.hostFolder
)
elif type == "datastore":
folder = utils_common.get_mor_by_property(
service_instance, vim.Folder, folder_name, "name", dc_ref.datastoreFolder
)
elif type == "network":
folder = utils_common.get_mor_by_property(
service_instance, vim.Folder, folder_name, "name", dc_ref.networkFolder
)
else:
raise salt.exceptions.CommandExecutionError("invalid type")
folder.Destroy_Task()
return {"status": "destroyed"}
def rename(folder_name, new_folder_name, dc_name, type, service_instance=None):
"""
Rename a folder on a given datacenter.
folder_name
Name of folder.
new_folder_name
Name to rename folder.
dc_name
Name of datacenter where folder will be renamed.
type
(vm, host, datastore, network) Type of folder to be renamed.
service_instance
(optional) The Service Instance from which to obtain managed object references.
"""
if service_instance is None:
service_instance = get_service_instance(opts=__opts__, pillar=__pillar__)
dc_ref = utils_datacenter.get_datacenter(service_instance, dc_name)
if type == "vm":
folder = utils_common.get_mor_by_property(
service_instance, vim.Folder, folder_name, "name", dc_ref.vmFolder
)
elif type == "host":
folder = utils_common.get_mor_by_property(
service_instance, vim.Folder, folder_name, "name", dc_ref.hostFolder
)
elif type == "datastore":
folder = utils_common.get_mor_by_property(
service_instance, vim.Folder, folder_name, "name", dc_ref.datastoreFolder
)
elif type == "network":
folder = utils_common.get_mor_by_property(
service_instance, vim.Folder, folder_name, "name", dc_ref.networkFolder
)
else:
raise salt.exceptions.CommandExecutionError("invalid type")
folder.Rename_Task(new_folder_name)
return {"status": "renamed"}
def move(folder_name, destination_folder_name, dc_name, type, service_instance=None):
"""
Move a folder on a given datacenter.
folder_name
Name of folder.
destination_folder_name
Destination folder for named folder.
dc_name
Name of datacenter where folder will be moved.
type
(vm, host, datastore, network) Type of folder to be moved.
service_instance
(optional) The Service Instance from which to obtain managed object references.
"""
if service_instance is None:
service_instance = get_service_instance(opts=__opts__, pillar=__pillar__)
dc_ref = utils_datacenter.get_datacenter(service_instance, dc_name)
if type == "vm":
folder = utils_common.get_mor_by_property(
service_instance, vim.Folder, folder_name, "name", dc_ref.vmFolder
)
destination = utils_common.get_mor_by_property(
service_instance, vim.Folder, destination_folder_name, "name", dc_ref.vmFolder
)
elif type == "host":
folder = utils_common.get_mor_by_property(
service_instance, vim.Folder, folder_name, "name", dc_ref.hostFolder
)
destination = utils_common.get_mor_by_property(
service_instance, vim.Folder, destination_folder_name, "name", dc_ref.hostFolder
)
elif type == "datastore":
folder = utils_common.get_mor_by_property(
service_instance, vim.Folder, folder_name, "name", dc_ref.datastoreFolder
)
destination = utils_common.get_mor_by_property(
service_instance, vim.Folder, destination_folder_name, "name", dc_ref.datastoreFolder
)
elif type == "network":
folder = utils_common.get_mor_by_property(
service_instance, vim.Folder, folder_name, "name", dc_ref.networkFolder
)
destination = utils_common.get_mor_by_property(
service_instance, vim.Folder, destination_folder_name, "name", dc_ref.networkFolder
)
else:
raise salt.exceptions.CommandExecutionError("invalid type")
task = destination.MoveIntoFolder_Task([folder])
utils_common.wait_for_task(task, folder.name, "move folder")
return {"status": "moved"} | /saltext.vmware-22.3.7.0rc1.tar.gz/saltext.vmware-22.3.7.0rc1/src/saltext/vmware/modules/folder.py | 0.40204 | 0.167559 | folder.py | pypi |
import logging
import saltext.vmware.utils.datastore as utils_datastore
from saltext.vmware.utils.connect import get_service_instance
log = logging.getLogger(__name__)
try:
from pyVmomi import vim
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
__virtualname__ = "vmware_datastore"
def __virtual__():
if not HAS_PYVMOMI:
return False, "Unable to import pyVmomi module."
return __virtualname__
def maintenance_mode(datastore_name, datacenter_name=None, service_instance=None):
"""
Put datastore in maintenance mode.
datastore_name
Name of datastore.
datacenter_name
(optional) Name of datacenter where datastore exists.
service_instance
(optional) The Service Instance from which to obtain managed object references.
"""
if service_instance is None:
service_instance = get_service_instance(opts=__opts__, pillar=__pillar__)
assert isinstance(datastore_name, str)
datastores = utils_datastore.get_datastores(
service_instance, datastore_name=datastore_name, datacenter_name=datacenter_name
)
ds = datastores[0] if datastores else None
ret = utils_datastore.enter_maintenance_mode(ds)
if ret:
return {"maintenanceMode": "inMaintenance"}
return {"maintenanceMode": "failed to enter maintenance mode"}
def exit_maintenance_mode(datastore_name, datacenter_name=None, service_instance=None):
"""
Take datastore out of maintenance mode.
datastore_name
Name of datastore.
datacenter_name
(optional) Name of datacenter where datastore exists.
service_instance
(optional) The Service Instance from which to obtain managed object references.
"""
if service_instance is None:
service_instance = get_service_instance(opts=__opts__, pillar=__pillar__)
assert isinstance(datastore_name, str)
datastores = utils_datastore.get_datastores(
service_instance, datastore_name=datastore_name, datacenter_name=datacenter_name
)
ds = datastores[0] if datastores else None
ret = utils_datastore.exit_maintenance_mode(ds)
if ret:
return {"maintenanceMode": "normal"}
return {"maintenanceMode": "failed to exit maintenance mode"}
def get(
datastore_name=None,
datacenter_name=None,
cluster_name=None,
host_name=None,
service_instance=None,
):
"""
Return info about datastores.
datacenter_name
Filter by this datacenter name (required when cluster is not specified)
datacenter_name
Filter by this datacenter name (required when cluster is not specified)
cluster_name
Filter by this cluster name (required when datacenter is not specified)
host_name
Filter by this ESXi hostname (optional).
service_instance
Use this vCenter service connection instance instead of creating a new one. (optional).
"""
log.debug(f"Running {__virtualname__}.get")
ret = []
if not service_instance:
service_instance = get_service_instance(opts=__opts__, pillar=__pillar__)
datastores = utils_datastore.get_datastores(
service_instance,
datastore_name=datastore_name,
datacenter_name=datacenter_name,
cluster_name=cluster_name,
host_name=host_name,
)
for datastore in datastores:
summary = datastore.summary
info = {
"accessible": summary.accessible,
"capacity": summary.capacity,
"freeSpace": summary.freeSpace,
"maintenanceMode": summary.maintenanceMode,
"multipleHostAccess": summary.multipleHostAccess,
"name": summary.name,
"type": summary.type,
"url": summary.url,
"uncommitted": summary.uncommitted if summary.uncommitted else 0,
}
ret.append(info)
return ret | /saltext.vmware-22.3.7.0rc1.tar.gz/saltext.vmware-22.3.7.0rc1/src/saltext/vmware/modules/datastore.py | 0.47317 | 0.24133 | datastore.py | pypi |
import logging
from saltext.vmware.utils import common
from saltext.vmware.utils import nsxt_request
log = logging.getLogger(__name__)
__virtual_name__ = "nsxt_ip_blocks"
IP_BLOCKS_BASE_URL = "https://{}/api/v1/pools/ip-blocks"
def __virtual__():
return __virtual_name__
def get(
hostname,
username,
password,
verify_ssl=True,
cert=None,
cert_common_name=None,
cursor=None,
included_fields=None,
page_size=None,
sort_by=None,
sort_ascending=None,
):
"""
Lists IP Address blocks present in the NSX-T Manager for given query params
CLI Example:
.. code-block:: bash
salt vm_minion nsxt_ip_blocks.get hostname=nsxt-manager.local username=admin ...
hostname
The host name of NSX-T manager
username
Username to connect to NSX-T manager
password
Password to connect to NSX-T manager
verify_ssl
(Optional) Option to enable/disable SSL verification. Enabled by default.
If set to False, the certificate validation is skipped.
cert
(Optional) Path to the SSL client certificate file to connect to NSX-T manager.
The certificate can be retrieved from browser.
cert_common_name
(Optional) By default, the hostname parameter and the common name in certificate is compared for host name
verification. If the client certificate common name and hostname do not match (in case of self-signed
certificates), specify the certificate common name as part of this parameter. This value is then used to
compare against
cursor
(Optional) Opaque cursor to be used for getting next page of records (supplied by current result page)
included_fields
(Optional) Comma separated list of fields that should be included in query result
page_size
(Optional) Maximum number of results to return in this page
sort_by
(Optional) Field by which records are sorted
sort_ascending
(Optional) Boolean value to sort result in ascending order
"""
log.info("Fetching IP Address Blocks")
url = IP_BLOCKS_BASE_URL.format(hostname)
params = common._filter_kwargs(
allowed_kwargs=["cursor", "included_fields", "page_size", "sort_ascending", "sort_by"],
default_dict=None,
cursor=cursor,
included_fields=included_fields,
page_size=page_size,
sort_by=sort_by,
sort_ascending=sort_ascending,
)
return nsxt_request.call_api(
method="get",
url=url,
username=username,
password=password,
cert_common_name=cert_common_name,
verify_ssl=verify_ssl,
cert=cert,
params=params,
)
def get_by_display_name(
hostname, username, password, display_name, verify_ssl=True, cert=None, cert_common_name=None
):
"""
Gets IP Address block present in the NSX-T Manager with given name.
CLI Example:
.. code-block:: bash
salt vm_minion nsxt_ip_blocks.get_by_display_name hostname=nsxt-manager.local username=admin ...
hostname
The host name of NSX-T manager
username
Username to connect to NSX-T manager
password
Password to connect to NSX-T manager
display_name
The name of IP Address block to fetch
verify_ssl
(Optional) Option to enable/disable SSL verification. Enabled by default.
If set to False, the certificate validation is skipped.
cert
(Optional) Path to the SSL client certificate file to connect to NSX-T manager.
The certificate can be retrieved from browser.
cert_common_name
(Optional) By default, the hostname parameter and the common name in certificate is compared for host name
verification. If the client certificate common name and hostname do not match (in case of self-signed
certificates), specify the certificate common name as part of this parameter. This value is then used to
compare against
"""
log.info("Finding IP Address Blocks with display name: %s", display_name)
ip_blocks = common._read_paginated(
func=get,
display_name=display_name,
hostname=hostname,
username=username,
password=password,
verify_ssl=verify_ssl,
cert=cert,
cert_common_name=cert_common_name,
)
if "error" in ip_blocks:
return ip_blocks
return {"results": ip_blocks}
def create(
cidr,
hostname,
username,
password,
verify_ssl=True,
cert=None,
cert_common_name=None,
display_name=None,
description=None,
tags=None,
):
"""
Creates an IP Address block with given specifications
CLI Example:
.. code-block:: bash
salt vm_minion nsxt_ip_blocks.create hostname=nsxt-manager.local username=admin ...
hostname
The host name of NSX-T manager
username
Username to connect to NSX-T manager
password
Password to connect to NSX-T manager
verify_ssl
(Optional) Option to enable/disable SSL verification. Enabled by default.
If set to False, the certificate validation is skipped.
cert
(Optional) Path to the SSL client certificate file to connect to NSX-T manager.
The certificate can be retrieved from browser.
cert_common_name
(Optional) By default, the hostname parameter and the common name in certificate is compared for host name
verification. If the client certificate common name and hostname do not match (in case of self-signed
certificates), specify the certificate common name as part of this parameter. This value is then used to
compare against
cidr
Represents network address and the prefix length which will be associated with a layer-2 broadcast domain
display_name
(Optional) The name using which IP Address Block will be created. If not provided then block id will be used as
display name
description
(Optional) description for the IP Address Block
tags
(Optional) Opaque identifiers meaningful to the API user. Maximum 30 tags can be associated:
.. code::
tags='[
{
"tag": "<tag-key-1>"
"scope": "<tag-value-1>"
},
{
"tag": "<tag-key-2>"
"scope": "<tag-value-2>"
}
]'
"""
log.info("Creating IP Address Block")
url = IP_BLOCKS_BASE_URL.format(hostname)
req_data = common._filter_kwargs(
allowed_kwargs=["display_name", "description", "tags"],
default_dict={"cidr": cidr},
display_name=display_name,
description=description,
tags=tags,
)
return nsxt_request.call_api(
method="post",
url=url,
username=username,
password=password,
cert_common_name=cert_common_name,
verify_ssl=verify_ssl,
cert=cert,
data=req_data,
)
def update(
ip_block_id,
cidr,
display_name,
revision,
hostname,
username,
password,
verify_ssl=True,
cert=None,
cert_common_name=None,
description=None,
tags=None,
):
"""
Updates an IP Address block of display name with given specifications. All the fields for which no value is
provided will be set to null
CLI Example:
.. code-block:: bash
salt vm_minion nsxt_ip_blocks.update hostname=nsxt-manager.local username=admin ...
hostname
The host name of NSX-T manager
username
Username to connect to NSX-T manager
password
Password to connect to NSX-T manager
verify_ssl
(Optional) Option to enable/disable SSL verification. Enabled by default.
If set to False, the certificate validation is skipped.
cert
(Optional) Path to the SSL client certificate file to connect to NSX-T manager.
The certificate can be retrieved from browser.
cert_common_name
(Optional) By default, the hostname parameter and the common name in certificate is compared for host name
verification. If the client certificate common name and hostname do not match (in case of self-signed
certificates), specify the certificate common name as part of this parameter. This value is then used to
compare against
ip_block_id
Id of the existing IP Address block
display_name
Existing IP Block display name. This is a non updatable field
description
(Optional) description for the IP Address Block
tags
(Optional) Opaque identifiers meaningful to the API user. Maximum 30 tags can be associated:
.. code::
tags='[
{
"tag": "<tag-key-1>"
"scope": "<tag-value-1>"
},
{
"tag": "<tag-key-2>"
"scope": "<tag-value-2>"
}
]'
cidr
Represents network address and the prefix length which will be associated with a layer-2 broadcast domain
revision
Revision number of IP block to update
"""
log.info("Updating IP Address block %s", display_name)
url = IP_BLOCKS_BASE_URL.format(hostname) + "/{}".format(ip_block_id)
req_data = common._filter_kwargs(
allowed_kwargs=["description", "tags"],
default_dict={
"id": ip_block_id,
"_revision": revision,
"display_name": display_name,
"cidr": cidr,
},
tags=tags,
description=description,
)
return nsxt_request.call_api(
method="put",
url=url,
username=username,
password=password,
cert_common_name=cert_common_name,
verify_ssl=verify_ssl,
cert=cert,
data=req_data,
)
def delete(
ip_block_id, hostname, username, password, verify_ssl=True, cert=None, cert_common_name=None
):
"""
Deletes an IP Address block with given id
CLI Example:
.. code-block:: bash
salt vm_minion nsxt_ip_blocks.delete hostname=nsxt-manager.local username=admin ...
hostname
The host name of NSX-T manager
username
Username to connect to NSX-T manager
password
Password to connect to NSX-T manager
ip_block_id
Existing IP Block id
verify_ssl
(Optional) Option to enable/disable SSL verification. Enabled by default.
If set to False, the certificate validation is skipped.
cert
(Optional) Path to the SSL client certificate file to connect to NSX-T manager.
The certificate can be retrieved from browser.
cert_common_name
(Optional) By default, the hostname parameter and the common name in certificate is compared for host name
verification. If the client certificate common name and hostname do not match (in case of self-signed
certificates), specify the certificate common name as part of this parameter. This value is then used to
compare against
"""
log.info("Deleting IP Address Block %s", ip_block_id)
url = IP_BLOCKS_BASE_URL.format(hostname) + "/{}".format(ip_block_id)
response = nsxt_request.call_api(
method="delete",
url=url,
username=username,
password=password,
cert_common_name=cert_common_name,
verify_ssl=verify_ssl,
cert=cert,
)
return response or "IP Block deleted successfully" | /saltext.vmware-22.3.7.0rc1.tar.gz/saltext.vmware-22.3.7.0rc1/src/saltext/vmware/modules/nsxt_ip_blocks.py | 0.741861 | 0.163646 | nsxt_ip_blocks.py | pypi |
from salt.utils.schema import ArrayItem
from salt.utils.schema import BooleanItem
from salt.utils.schema import ComplexSchemaItem
from salt.utils.schema import DefinitionsSchema
from salt.utils.schema import IntegerItem
from salt.utils.schema import OneOfItem
from salt.utils.schema import Schema
from salt.utils.schema import StringItem
class VMwareScsiAddressItem(StringItem):
"""
Schema item of a VMware SCSI Address item
"""
pattern = r"vmhba\d+:C\d+:T\d+:L\d+"
class DiskGroupDiskScsiAddressItem(ComplexSchemaItem):
"""
Schema item of a ESXi host disk group containing disk SCSI addresses
"""
title = "Diskgroup Disk Scsi Address Item"
description = "ESXi host diskgroup item containing disk SCSI addresses"
cache_scsi_addr = VMwareScsiAddressItem(
title="Cache Disk Scsi Address",
description="Specifies the SCSI address of the cache disk",
required=True,
)
capacity_scsi_addrs = ArrayItem(
title="Capacity Scsi Addresses",
description="Array with the SCSI addresses of the capacity disks",
items=VMwareScsiAddressItem(),
min_items=1,
)
class DiskGroupDiskIdItem(ComplexSchemaItem):
"""
Schema item of a ESXi host disk group containg disk ids
"""
title = "Diskgroup Disk Id Item"
description = "ESXi host diskgroup item containing disk ids"
cache_id = StringItem(
title="Cache Disk Id",
description="Specifies the id of the cache disk",
pattern=r"[^\s]+",
)
capacity_ids = ArrayItem(
title="Capacity Disk Ids",
description="Array with the ids of the capacity disks",
items=StringItem(pattern=r"[^\s]+"),
min_items=1,
)
class DiskGroupsDiskScsiAddressSchema(DefinitionsSchema):
"""
Schema of ESXi host diskgroups containing disk SCSI addresses
"""
title = "Diskgroups Disk Scsi Address Schema"
description = "ESXi host diskgroup schema containing disk SCSI addresses"
diskgroups = ArrayItem(
title="Diskgroups",
description="List of diskgroups in an ESXi host",
min_items=1,
items=DiskGroupDiskScsiAddressItem(),
required=True,
)
erase_disks = BooleanItem(title="Erase Diskgroup Disks", required=True)
class DiskGroupsDiskIdSchema(DefinitionsSchema):
"""
Schema of ESXi host diskgroups containing disk ids
"""
title = "Diskgroups Disk Id Schema"
description = "ESXi host diskgroup schema containing disk ids"
diskgroups = ArrayItem(
title="DiskGroups",
description="List of disk groups in an ESXi host",
min_items=1,
items=DiskGroupDiskIdItem(),
required=True,
)
class VmfsDatastoreDiskIdItem(ComplexSchemaItem):
"""
Schema item of a VMFS datastore referencing a backing disk id
"""
title = "VMFS Datastore Disk Id Item"
description = "VMFS datastore item referencing a backing disk id"
name = StringItem(
title="Name",
description="Specifies the name of the VMFS datastore",
required=True,
)
backing_disk_id = StringItem(
title="Backing Disk Id",
description=("Specifies the id of the disk backing the VMFS " "datastore"),
pattern=r"[^\s]+",
required=True,
)
vmfs_version = IntegerItem(title="VMFS Version", description="VMFS version", enum=[1, 2, 3, 5])
class VmfsDatastoreDiskScsiAddressItem(ComplexSchemaItem):
"""
Schema item of a VMFS datastore referencing a backing disk SCSI address
"""
title = "VMFS Datastore Disk Scsi Address Item"
description = "VMFS datastore item referencing a backing disk SCSI address"
name = StringItem(
title="Name",
description="Specifies the name of the VMFS datastore",
required=True,
)
backing_disk_scsi_addr = VMwareScsiAddressItem(
title="Backing Disk Scsi Address",
description=("Specifies the SCSI address of the disk backing the VMFS " "datastore"),
required=True,
)
vmfs_version = IntegerItem(title="VMFS Version", description="VMFS version", enum=[1, 2, 3, 5])
class VmfsDatastoreSchema(DefinitionsSchema):
"""
Schema of a VMFS datastore
"""
title = "VMFS Datastore Schema"
description = "Schema of a VMFS datastore"
datastore = OneOfItem(
items=[VmfsDatastoreDiskScsiAddressItem(), VmfsDatastoreDiskIdItem()],
required=True,
)
class HostCacheSchema(DefinitionsSchema):
"""
Schema of ESXi host cache
"""
title = "Host Cache Schema"
description = "Schema of the ESXi host cache"
enabled = BooleanItem(title="Enabled", required=True)
datastore = VmfsDatastoreDiskScsiAddressItem(required=True)
swap_size = StringItem(
title="Host cache swap size (in GB or %)",
pattern=r"(\d+GiB)|(([0-9]|([1-9][0-9])|100)%)",
required=True,
)
erase_backing_disk = BooleanItem(title="Erase Backup Disk", required=True)
class SimpleHostCacheSchema(Schema):
"""
Simplified Schema of ESXi host cache
"""
title = "Simple Host Cache Schema"
description = "Simplified schema of the ESXi host cache"
enabled = BooleanItem(title="Enabled", required=True)
datastore_name = StringItem(title="Datastore Name", required=True)
swap_size_MiB = IntegerItem(title="Host cache swap size in MiB", minimum=1)
class EsxiProxySchema(Schema):
"""
Schema of the esxi proxy input
"""
title = "Esxi Proxy Schema"
description = "Esxi proxy schema"
additional_properties = False
proxytype = StringItem(required=True, enum=["esxi"])
host = StringItem(pattern=r"[^\s]+") # Used when connecting directly
vcenter = StringItem(pattern=r"[^\s]+") # Used when connecting via a vCenter
esxi_host = StringItem()
username = StringItem()
passwords = ArrayItem(min_items=1, items=StringItem(), unique_items=True)
mechanism = StringItem(enum=["userpass", "sspi"])
verify_ssl = BooleanItem()
# TODO Should be changed when anyOf is supported for schemas # pylint: disable=W0511
domain = StringItem()
principal = StringItem()
protocol = StringItem()
port = IntegerItem(minimum=1) | /saltext.vmware-22.3.7.0rc1.tar.gz/saltext.vmware-22.3.7.0rc1/src/saltext/vmware/config/schemas/esxi.py | 0.609175 | 0.219484 | esxi.py | pypi |
from salt.utils.schema import AnyOfItem
from salt.utils.schema import ArrayItem
from salt.utils.schema import BooleanItem
from salt.utils.schema import ComplexSchemaItem
from salt.utils.schema import DefinitionsSchema
from salt.utils.schema import DictItem
from salt.utils.schema import IntegerItem
from salt.utils.schema import Schema
from salt.utils.schema import StringItem
class OptionValueItem(ComplexSchemaItem):
"""Sechma item of the OptionValue"""
title = "OptionValue"
key = StringItem(title="Key", required=True)
value = AnyOfItem(items=[StringItem(), BooleanItem(), IntegerItem()])
class AdmissionControlPolicyItem(ComplexSchemaItem):
"""
Schema item of the HA admission control policy
"""
title = "Admission Control Policy"
cpu_failover_percent = IntegerItem(title="CPU Failover Percent", minimum=0, maximum=100)
memory_failover_percent = IntegerItem(title="Memory Failover Percent", minimum=0, maximum=100)
class DefaultVmSettingsItem(ComplexSchemaItem):
"""
Schema item of the HA default vm settings
"""
title = "Default VM Settings"
isolation_response = StringItem(
title="Isolation Response",
enum=["clusterIsolationResponse", "none", "powerOff", "shutdown"],
)
restart_priority = StringItem(
title="Restart Priority",
enum=["clusterRestartPriority", "disabled", "high", "low", "medium"],
)
class HAConfigItem(ComplexSchemaItem):
"""
Schema item of ESX cluster high availability
"""
title = "HA Configuration"
description = "ESX cluster HA configuration json schema item"
enabled = BooleanItem(title="Enabled", description="Specifies if HA should be enabled")
admission_control_enabled = BooleanItem(title="Admission Control Enabled")
admission_control_policy = AdmissionControlPolicyItem()
default_vm_settings = DefaultVmSettingsItem()
hb_ds_candidate_policy = StringItem(
title="Heartbeat Datastore Candidate Policy",
enum=["allFeasibleDs", "allFeasibleDsWithUserPreference", "userSelectedDs"],
)
host_monitoring = StringItem(title="Host Monitoring", choices=["enabled", "disabled"])
options = ArrayItem(min_items=1, items=OptionValueItem())
vm_monitoring = StringItem(
title="Vm Monitoring",
choices=["vmMonitoringDisabled", "vmAndAppMonitoring", "vmMonitoringOnly"],
)
class VSANClusterConfigItem(ComplexSchemaItem):
"""
Schema item of the ESX cluster vSAN configuration
"""
title = "vSAN Configuration"
description = "ESX cluster vSAN configurationi item"
enabled = BooleanItem(title="Enabled", description="Specifies if vSAN should be enabled")
auto_claim_storage = BooleanItem(
title="Auto Claim Storage",
description="Specifies whether the storage of member ESXi hosts should "
"be automatically claimed for vSAN",
)
dedup_enabled = BooleanItem(title="Enabled", description="Specifies dedup should be enabled")
compression_enabled = BooleanItem(
title="Enabled", description="Specifies if compression should be enabled"
)
class DRSConfigItem(ComplexSchemaItem):
"""
Schema item of the ESX cluster DRS configuration
"""
title = "DRS Configuration"
description = "ESX cluster DRS configuration item"
enabled = BooleanItem(title="Enabled", description="Specifies if DRS should be enabled")
vmotion_rate = IntegerItem(
title="vMotion rate",
description="Aggressiveness to do automatic vMotions: "
"1 (least aggressive) - 5 (most aggressive)",
minimum=1,
maximum=5,
)
default_vm_behavior = StringItem(
title="Default VM DRS Behavior",
description="Specifies the default VM DRS behavior",
enum=["fullyAutomated", "partiallyAutomated", "manual"],
)
class ESXClusterConfigSchema(DefinitionsSchema):
"""
Schema of the ESX cluster config
"""
title = "ESX Cluster Configuration Schema"
description = "ESX cluster configuration schema"
ha_item = HAConfigItem()
vsan = VSANClusterConfigItem()
drs = DRSConfigItem()
vm_swap_placement = StringItem(title="VM Swap Placement")
class ESXClusterEntitySchema(Schema):
"""Schema of the ESX cluster entity"""
title = "ESX Cluster Entity Schema"
description = "ESX cluster entity schema"
type = StringItem(
title="Type",
description="Specifies the entity type",
required=True,
enum=["cluster"],
)
datacenter = StringItem(
title="Datacenter",
description="Specifies the cluster datacenter",
required=True,
pattern=r"\w+",
)
cluster = StringItem(
title="Cluster",
description="Specifies the cluster name",
required=True,
pattern=r"\w+",
)
class LicenseSchema(Schema):
"""
Schema item of the ESX cluster vSAN configuration
"""
title = "Licenses schema"
description = "License configuration schema"
licenses = DictItem(
title="Licenses",
description="Dictionary containing the license name to key mapping",
required=True,
additional_properties=StringItem(
title="License Key",
description="Specifies the license key",
pattern=r"^(\w{5}-\w{5}-\w{5}-\w{5}-\w{5})$",
),
)
class EsxclusterProxySchema(Schema):
"""
Schema of the esxcluster proxy input
"""
title = "Esxcluster Proxy Schema"
description = "Esxcluster proxy schema"
additional_properties = False
proxytype = StringItem(required=True, enum=["esxcluster"])
vcenter = StringItem(required=True, pattern=r"[^\s]+")
datacenter = StringItem(required=True)
cluster = StringItem(required=True)
mechanism = StringItem(required=True, enum=["userpass", "sspi"])
username = StringItem()
passwords = ArrayItem(min_items=1, items=StringItem(), unique_items=True)
# TODO Should be changed when anyOf is supported for schemas # pylint: disable=W0511
domain = StringItem()
principal = StringItem()
protocol = StringItem()
port = IntegerItem(minimum=1) | /saltext.vmware-22.3.7.0rc1.tar.gz/saltext.vmware-22.3.7.0rc1/src/saltext/vmware/config/schemas/esxcluster.py | 0.728941 | 0.194368 | esxcluster.py | pypi |
from salt.utils.schema import AnyOfItem
from salt.utils.schema import ArrayItem
from salt.utils.schema import BooleanItem
from salt.utils.schema import ComplexSchemaItem
from salt.utils.schema import DefinitionsSchema
from salt.utils.schema import IntegerItem
from salt.utils.schema import IPv4Item
from salt.utils.schema import NullItem
from salt.utils.schema import NumberItem
from salt.utils.schema import StringItem
class ESXVirtualMachineSerialBackingItem(ComplexSchemaItem):
"""
Configuration Schema Item for ESX Virtual Machine Serial Port Backing
"""
title = "ESX Virtual Machine Serial Port Backing"
description = "ESX virtual machine serial port backing"
required = True
uri = StringItem()
direction = StringItem(enum=("client", "server"))
filename = StringItem()
class ESXVirtualMachineDeviceConnectionItem(ComplexSchemaItem):
"""
Configuration Schema Item for ESX Virtual Machine Serial Port Connection
"""
title = "ESX Virtual Machine Serial Port Connection"
description = "ESX virtual machine serial port connection"
required = True
allow_guest_control = BooleanItem(default=True)
start_connected = BooleanItem(default=True)
class ESXVirtualMachinePlacementSchemaItem(ComplexSchemaItem):
"""
Configuration Schema Item for ESX Virtual Machine Placement
"""
title = "ESX Virtual Machine Placement Information"
description = "ESX virtual machine placement property"
required = True
cluster = StringItem(
title="Virtual Machine Cluster",
description="Cluster of the virtual machine if it is placed to a cluster",
)
host = StringItem(
title="Virtual Machine Host",
description="Host of the virtual machine if it is placed to a standalone host",
)
resourcepool = StringItem(
title="Virtual Machine Resource Pool",
description="Resource pool of the virtual machine if it is placed to a resource pool",
)
folder = StringItem(
title="Virtual Machine Folder",
description="Folder of the virtual machine where it should be deployed, default is the datacenter vmFolder",
)
class ESXVirtualMachineCdDriveClientSchemaItem(ComplexSchemaItem):
"""
Configuration Schema Item for ESX Virtual Machine CD Drive Client
"""
title = "ESX Virtual Machine Serial CD Client"
description = "ESX virtual machine CD/DVD drive client properties"
mode = StringItem(required=True, enum=("passthrough", "atapi"))
class ESXVirtualMachineCdDriveIsoSchemaItem(ComplexSchemaItem):
"""
Configuration Schema Item for ESX Virtual Machine CD Drive ISO
"""
title = "ESX Virtual Machine Serial CD ISO"
description = "ESX virtual machine CD/DVD drive ISO properties"
path = StringItem(required=True)
class ESXVirtualMachineCdDriveSchemaItem(ComplexSchemaItem):
"""
Configuration Schema Item for ESX Virtual Machine CD Drives
"""
title = "ESX Virtual Machine Serial CD"
description = "ESX virtual machine CD/DVD drive properties"
adapter = StringItem(
title="Virtual Machine CD/DVD Adapter",
description="Unique adapter name for virtual machine cd/dvd drive",
required=True,
)
controller = StringItem(required=True)
device_type = StringItem(
title="Virtual Machine Device Type",
description="CD/DVD drive of the virtual machine if it is placed to a cluster",
required=True,
default="client_device",
enum=("datastore_iso_file", "client_device"),
)
client_device = ESXVirtualMachineCdDriveClientSchemaItem()
datastore_iso_file = ESXVirtualMachineCdDriveIsoSchemaItem()
connectable = ESXVirtualMachineDeviceConnectionItem()
class ESXVirtualMachineSerialSchemaItem(ComplexSchemaItem):
"""
Configuration Schema Item for ESX Virtual Machine Serial Port
"""
title = "ESX Virtual Machine Serial Port Configuration"
description = "ESX virtual machine serial port properties"
type = StringItem(
title="Virtual Machine Serial Port Type",
required=True,
enum=("network", "pipe", "file", "device"),
)
adapter = StringItem(
title="Virtual Machine Serial Port Name",
description="Unique adapter name for virtual machine serial port"
"for creation an arbitrary value should be specified",
required=True,
)
backing = ESXVirtualMachineSerialBackingItem()
connectable = ESXVirtualMachineDeviceConnectionItem()
yield_port = BooleanItem(
title="Serial Port Yield", description="Serial port yield", default=False
)
class ESXVirtualMachineScsiSchemaItem(ComplexSchemaItem):
"""
Configuration Schema Item for ESX Virtual Machine SCSI Controller
"""
title = "ESX Virtual Machine SCSI Controller Configuration"
description = "ESX virtual machine scsi controller properties"
required = True
adapter = StringItem(
title="Virtual Machine SCSI Controller Name",
description="Unique SCSI controller name"
"for creation an arbitrary value should be specified",
required=True,
)
type = StringItem(
title="Virtual Machine SCSI type",
description="Type of the SCSI controller",
required=True,
enum=("lsilogic", "lsilogic_sas", "paravirtual", "buslogic"),
)
bus_sharing = StringItem(
title="Virtual Machine SCSI bus sharing",
description="Sharing type of the SCSI bus",
required=True,
enum=("virtual_sharing", "physical_sharing", "no_sharing"),
)
bus_number = NumberItem(
title="Virtual Machine SCSI bus number",
description="Unique bus number of the SCSI device",
required=True,
)
class ESXVirtualMachineSataSchemaItem(ComplexSchemaItem):
"""
Configuration Schema Item for ESX Virtual Machine SATA Controller
"""
title = "ESX Virtual Machine SATA Controller Configuration"
description = "ESX virtual machine SATA controller properties"
required = False
adapter = StringItem(
title="Virtual Machine SATA Controller Name",
description="Unique SATA controller name"
"for creation an arbitrary value should be specified",
required=True,
)
bus_number = NumberItem(
title="Virtual Machine SATA bus number",
description="Unique bus number of the SATA device",
required=True,
)
class ESXVirtualMachineDiskSchemaItem(ComplexSchemaItem):
"""
Configuration Schema Item for ESX Virtual Machine Disk
"""
title = "ESX Virtual Machine Disk Configuration"
description = "ESX virtual machine disk properties"
required = True
size = NumberItem(title="Disk size", description="Size of the disk in GB", required=True)
unit = StringItem(
title="Disk size unit",
description="Unit of the disk size, to VMware a " "GB is the same as GiB = 1024MiB",
required=False,
default="GB",
enum=("KB", "MB", "GB"),
)
adapter = StringItem(
title="Virtual Machine Adapter Name",
description="Unique adapter name for virtual machine"
"for creation an arbitrary value should be specified",
required=True,
)
filename = StringItem(
title="Virtual Machine Disk File",
description="File name of the virtual machine vmdk",
)
datastore = StringItem(
title="Virtual Machine Disk Datastore",
description="Disk datastore where the virtual machine files will be placed",
required=True,
)
address = StringItem(
title="Virtual Machine SCSI Address",
description="Address of the SCSI adapter for the virtual machine",
pattern=r"\d:\d",
)
thin_provision = BooleanItem(
title="Virtual Machine Disk Provision Type",
description="Provision type of the disk",
default=True,
required=False,
)
eagerly_scrub = AnyOfItem(required=False, items=[BooleanItem(), NullItem()])
controller = StringItem(
title="Virtual Machine SCSI Adapter",
description="Name of the SCSI adapter where the disk will be connected",
required=True,
)
class ESXVirtualMachineNicMapSchemaItem(ComplexSchemaItem):
"""
Configuration Schema Item for ESX Virtual Machine Nic Map
"""
title = "ESX Virtual Machine Nic Configuration"
description = "ESX Virtual Machine nic properties"
required = False
domain = StringItem()
gateway = IPv4Item()
ip_addr = IPv4Item()
subnet_mask = IPv4Item()
class ESXVirtualMachineInterfaceSchemaItem(ComplexSchemaItem):
"""
Configuration Schema Item for ESX Virtual Machine Network Interface
"""
title = "ESX Virtual Machine Network Interface Configuration"
description = "ESX Virtual Machine network adapter properties"
required = True
name = StringItem(
title="Virtual Machine Port Group",
description="Specifies the port group name for the virtual machine connection",
required=True,
)
adapter = StringItem(
title="Virtual Machine Network Adapter",
description="Unique name of the network adapter, "
"for creation an arbitrary value should be specified",
required=True,
)
adapter_type = StringItem(
title="Virtual Machine Adapter Type",
description="Network adapter type of the virtual machine",
required=True,
enum=("vmxnet", "vmxnet2", "vmxnet3", "e1000", "e1000e"),
default="vmxnet3",
)
switch_type = StringItem(
title="Virtual Machine Switch Type",
description="Specifies the type of the virtual switch for the virtual machine connection",
required=True,
default="standard",
enum=("standard", "distributed"),
)
mac = StringItem(
title="Virtual Machine MAC Address",
description="Mac address of the virtual machine",
required=False,
pattern="^([0-9a-f]{1,2}[:]){5}([0-9a-f]{1,2})$",
)
mapping = ESXVirtualMachineNicMapSchemaItem()
connectable = ESXVirtualMachineDeviceConnectionItem()
class ESXVirtualMachineMemorySchemaItem(ComplexSchemaItem):
"""
Configurtation Schema Item for ESX Virtual Machine Memory
"""
title = "ESX Virtual Machine Memory Configuration"
description = "ESX Virtual Machine memory property"
required = True
size = IntegerItem(title="Memory size", description="Size of the memory", required=True)
unit = StringItem(
title="Memory unit",
description="Unit of the memory, to VMware a " "GB is the same as GiB = 1024MiB",
required=False,
default="MB",
enum=("MB", "GB"),
)
hotadd = BooleanItem(required=False, default=False)
reservation_max = BooleanItem(required=False, default=False)
class ESXVirtualMachineCpuSchemaItem(ComplexSchemaItem):
"""
Configurtation Schema Item for ESX Virtual Machine CPU
"""
title = "ESX Virtual Machine Memory Configuration"
description = "ESX Virtual Machine memory property"
required = True
count = IntegerItem(title="CPU core count", description="CPU core count", required=True)
cores_per_socket = IntegerItem(
title="CPU cores per socket",
description="CPU cores per socket count",
required=False,
)
nested = BooleanItem(
title="Virtual Machine Nested Property",
description="Nested virtualization support",
default=False,
)
hotadd = BooleanItem(
title="Virtual Machine CPU hot add", description="CPU hot add", default=False
)
hotremove = BooleanItem(
title="Virtual Machine CPU hot remove",
description="CPU hot remove",
default=False,
)
class ESXVirtualMachineConfigSchema(DefinitionsSchema):
"""
Configuration Schema for ESX Virtual Machines
"""
title = "ESX Virtual Machine Configuration Schema"
description = "ESX Virtual Machine configuration schema"
vm_name = StringItem(
title="Virtual Machine name",
description="Name of the virtual machine",
required=True,
)
cpu = ESXVirtualMachineCpuSchemaItem()
memory = ESXVirtualMachineMemorySchemaItem()
image = StringItem(title="Virtual Machine guest OS", description="Guest OS type", required=True)
version = StringItem(
title="Virtual Machine hardware version",
description="Container hardware version property",
required=True,
)
interfaces = ArrayItem(
items=ESXVirtualMachineInterfaceSchemaItem(),
min_items=1,
required=False,
unique_items=True,
)
disks = ArrayItem(
items=ESXVirtualMachineDiskSchemaItem(),
min_items=1,
required=False,
unique_items=True,
)
scsi_devices = ArrayItem(
items=ESXVirtualMachineScsiSchemaItem(),
min_items=1,
required=False,
unique_items=True,
)
serial_ports = ArrayItem(
items=ESXVirtualMachineSerialSchemaItem(),
min_items=0,
required=False,
unique_items=True,
)
cd_dvd_drives = ArrayItem(
items=ESXVirtualMachineCdDriveSchemaItem(),
min_items=0,
required=False,
unique_items=True,
)
sata_controllers = ArrayItem(
items=ESXVirtualMachineSataSchemaItem(),
min_items=0,
required=False,
unique_items=True,
)
datacenter = StringItem(
title="Virtual Machine Datacenter",
description="Datacenter of the virtual machine",
required=True,
)
datastore = StringItem(
title="Virtual Machine Datastore",
description="Datastore of the virtual machine",
required=True,
)
placement = ESXVirtualMachinePlacementSchemaItem()
template = BooleanItem(
title="Virtual Machine Template",
description="Template to create the virtual machine from",
default=False,
)
tools = BooleanItem(
title="Virtual Machine VMware Tools",
description="Install VMware tools on the guest machine",
default=False,
)
power_on = BooleanItem(
title="Virtual Machine Power",
description="Power on virtual machine afret creation",
default=False,
)
deploy = BooleanItem(
title="Virtual Machine Deploy Salt",
description="Deploy salt after successful installation",
default=False,
)
class ESXVirtualMachineRemoveSchema(DefinitionsSchema):
"""
Remove Schema for ESX Virtual Machines to delete or unregister virtual machines
"""
name = StringItem(
title="Virtual Machine name",
description="Name of the virtual machine",
required=True,
)
datacenter = StringItem(
title="Virtual Machine Datacenter",
description="Datacenter of the virtual machine",
required=True,
)
placement = AnyOfItem(
required=False, items=[ESXVirtualMachinePlacementSchemaItem(), NullItem()]
)
power_off = BooleanItem(
title="Power off vm",
description="Power off vm before delete operation",
required=False,
)
class ESXVirtualMachineDeleteSchema(ESXVirtualMachineRemoveSchema):
"""
Deletion Schema for ESX Virtual Machines
"""
class ESXVirtualMachineUnregisterSchema(ESXVirtualMachineRemoveSchema):
"""
Unregister Schema for ESX Virtual Machines
""" | /saltext.vmware-22.3.7.0rc1.tar.gz/saltext.vmware-22.3.7.0rc1/src/saltext/vmware/config/schemas/esxvm.py | 0.79653 | 0.233029 | esxvm.py | pypi |
import logging
log = logging.getLogger(__name__)
__virtual_name__ = "nsxt_ip_pools"
def __virtual__():
"""
Only load if the nsxt_ip_pools module is available in __salt__
"""
return (
__virtual_name__ if "nsxt_ip_pools.get" in __salt__ else False,
"'nsxt_ip_pools' binary not found on system",
)
def _create_state_response(name, old_state, new_state, result, comment):
state_response = dict()
state_response["name"] = name
state_response["result"] = result
state_response["comment"] = comment
state_response["changes"] = dict()
if new_state or old_state:
state_response["changes"]["old"] = old_state
state_response["changes"]["new"] = new_state
return state_response
def _needs_update(existing_ip_pool, input_dict):
updatable_keys = ["subnets", "description", "tags", "ip_release_delay"]
is_updatable = False
# check if any updatable field has different value from the existing one
for key in updatable_keys:
existing_val = existing_ip_pool.get(key)
input_val = input_dict.get(key)
if not existing_val and input_val:
is_updatable = True
if existing_val and input_val and existing_val != input_val:
is_updatable = True
return is_updatable
def _fill_input_dict_with_existing_info(existing_ip_pool, input_dict):
for key in dict(existing_ip_pool).keys():
if key not in input_dict:
input_dict[key] = existing_ip_pool[key]
def present(
name,
display_name,
hostname,
username,
password,
cert=None,
verify_ssl=True,
cert_common_name=None,
description=None,
tags=None,
subnets=None,
ip_release_delay=None,
):
"""
Creates/Updates(if present with the same name) an IP Address Pool
.. code-block:: yaml
create_ip_pool:
nsxt_ip_pools.present:
- name: Create IP Pool
hostname: {{ pillar['nsxt_manager_hostname'] }}
username: {{ pillar['nsxt_manager_username'] }}
password: {{ pillar['nsxt_manager_password'] }}
cert: {{ pillar['nsxt_manager_certificate'] }}
verify_ssl: <False/True>
display_name: <ip pool name>
description: <ip pool description>
tags:
- tag: <tag-key-1>
scope: <tag-value-1>
- tag: <tag-key-2>
scope: <tag-value-2>
subnets:
- cidr: <cidr_value>
gateway_ip: <gateway_ip_value>
dns_nameservers:
- <dns_nameserver1>
- <dns_nameserver2>
allocation_ranges:
- start: <IP-Address-Range-start-1>
end: <IP-Address-Range-end-1>
ip_release_delay: <delay in milliseconds>
name
The Operation to perform
hostname
The host name of NSX-T manager
username
Username to connect to NSX-T manager
password
Password to connect to NSX-T manager
verify_ssl
(Optional) Option to enable/disable SSL verification. Enabled by default.
If set to False, the certificate validation is skipped.
cert
(Optional) Path to the SSL client certificate file to connect to NSX-T manager.
The certificate can be retrieved from browser.
cert_common_name
(Optional) By default, the hostname parameter and the common name in certificate is compared for host name
verification. If the client certificate common name and hostname do not match (in case of self-signed
certificates), specify the certificate common name as part of this parameter. This value is then used to
compare against
display_name
The name using which IP Address Pool will be created. In case of update the provided display_name should be
unique else the method will raise an error
description
(Optional) description for the IP Address Pool
tags
(Optional) Opaque identifiers meaningful to the API user. Maximum 30 tags can be associated:
.. code-block:: yaml
tags:
- tag: <tag-key-1>
scope: <tag-value-1>
- tag: <tag-key-2>
scope: <tag-value-2>
subnets
(Optional) The collection of one or more subnet objects in a pool.
Subnets can be IPv4 or IPv6 and they should not overlap. The maximum number will not exceed 5 subnets.
.. code-block:: yaml
subnets:
- cidr: <cidr_value>
gateway_ip: <gateway_ip_value>
dns_nameservers:
- <dns_nameserver1>
<dns_nameserver2>
allocation_ranges:
- start: <IP-Address-Range-start-1>
end: <IP-Address-Range-end-1>
ip_release_delay
(Optional) Delay in milliseconds, while releasing allocated IP address from IP pool (Default is 2 mins - configured on NSX device).
"""
input_dict = {
"display_name": display_name,
"description": description,
"tags": tags,
"subnets": subnets,
"ip_release_delay": ip_release_delay,
}
log.info("Checking if IP Pool with name %s is present", display_name)
get_ip_pools_response = __salt__["nsxt_ip_pools.get_by_display_name"](
hostname,
username,
password,
display_name,
cert=cert,
verify_ssl=verify_ssl,
cert_common_name=cert_common_name,
)
if "error" in get_ip_pools_response:
return _create_state_response(name, None, None, False, get_ip_pools_response["error"])
ip_pools = get_ip_pools_response["results"]
if len(ip_pools) > 1:
log.info("Multiple instances found for the provided display name %s", display_name)
return _create_state_response(
name,
None,
None,
False,
"Multiple IP Pools found for the provided display name {}".format(display_name),
)
existing_ip_pool = ip_pools[0] if len(ip_pools) > 0 else None
if __opts__.get("test"):
log.info("present is called with test option")
if existing_ip_pool:
return _create_state_response(
name,
None,
None,
None,
"State present will update IP Pool with name {}".format(display_name),
)
else:
return _create_state_response(
name,
None,
None,
None,
"State present will create IP Pool with name {}".format(display_name),
)
if existing_ip_pool:
is_update_required = _needs_update(existing_ip_pool, input_dict)
if is_update_required:
_fill_input_dict_with_existing_info(existing_ip_pool, input_dict)
log.info("IP Pool found with name %s", display_name)
updated_ip_pool = __salt__["nsxt_ip_pools.update"](
ip_pool_id=existing_ip_pool["id"],
revision=existing_ip_pool["_revision"],
hostname=hostname,
username=username,
password=password,
cert=cert,
verify_ssl=verify_ssl,
cert_common_name=cert_common_name,
display_name=input_dict.get("display_name"),
description=input_dict.get("description"),
tags=input_dict.get("tags"),
subnets=input_dict.get("subnets"),
ip_release_delay=input_dict.get("ip_release_delay"),
)
if "error" in updated_ip_pool:
return _create_state_response(name, None, None, False, updated_ip_pool["error"])
return _create_state_response(
name,
existing_ip_pool,
updated_ip_pool,
True,
"Updated IP Pool {}".format(display_name),
)
else:
log.info("All fields are same as existing IP Address Pool %s", display_name)
return _create_state_response(
name, None, None, True, "IP Address Pool exists already, no action to perform"
)
else:
log.info("No IP Pool found with name %s", display_name)
created_ip_pool = __salt__["nsxt_ip_pools.create"](
hostname,
username,
password,
display_name=input_dict.get("display_name"),
cert=cert,
verify_ssl=verify_ssl,
cert_common_name=cert_common_name,
description=input_dict.get("description"),
tags=input_dict.get("tags"),
subnets=input_dict.get("subnets"),
ip_release_delay=input_dict.get("ip_release_delay"),
)
if "error" in created_ip_pool:
return _create_state_response(name, None, None, False, created_ip_pool["error"])
return _create_state_response(
name, None, created_ip_pool, True, "Created IP Pool {}".format(display_name)
)
def absent(
name,
display_name,
hostname,
username,
password,
cert=None,
verify_ssl=True,
cert_common_name=None,
):
"""
Deletes an IP Address Pool of provided name (if present)
.. code-block:: yaml
delete_ip_pool:
nsxt_ip_pools.absent:
- name: Delete IP Pool
hostname: {{ pillar['nsxt_manager_hostname'] }}
username: {{ pillar['nsxt_manager_username'] }}
password: {{ pillar['nsxt_manager_password'] }}
cert: {{ pillar['nsxt_manager_certificate'] }}
verify_ssl: <False/True>
display_name: <ip pool name>
name
The Operation to perform
hostname
The host name of NSX-T manager
username
Username to connect to NSX-T manager
password
Password to connect to NSX-T manager
verify_ssl
(Optional) Option to enable/disable SSL verification. Enabled by default.
If set to False, the certificate validation is skipped.
cert
(Optional) Path to the SSL client certificate file to connect to NSX-T manager.
The certificate can be retrieved from browser.
cert_common_name
(Optional) By default, the hostname parameter and the common name in certificate is compared for host name
verification. If the client certificate common name and hostname do not match (in case of self-signed
certificates), specify the certificate common name as part of this parameter. This value is then used to
compare against
display_name
Display name of IP Address Pool to delete
"""
log.info("Checking if IP Address Pool with name %s is present", display_name)
get_ip_pools_response = __salt__["nsxt_ip_pools.get_by_display_name"](
hostname,
username,
password,
display_name,
cert=cert,
verify_ssl=verify_ssl,
cert_common_name=cert_common_name,
)
if "error" in get_ip_pools_response:
return _create_state_response(name, None, None, False, get_ip_pools_response["error"])
ip_pools = get_ip_pools_response["results"]
if len(ip_pools) > 1:
log.info("Multiple instances found for the provided display name %s", display_name)
return _create_state_response(
name,
None,
None,
False,
"Multiple IP Pools found for the provided display name {}".format(display_name),
)
existing_ip_pool = ip_pools[0] if len(ip_pools) > 0 else None
if __opts__.get("test"):
log.info("absent is called with test option")
if existing_ip_pool:
return _create_state_response(
name,
None,
None,
None,
"State absent will delete IP Pool with name {}".format(display_name),
)
else:
return _create_state_response(
name,
None,
None,
None,
"State absent will do nothing as no IP Pool found with name {}".format(
display_name
),
)
if existing_ip_pool:
log.info("IP Address Pool found with name %s", display_name)
deleted_response = __salt__["nsxt_ip_pools.delete"](
existing_ip_pool["id"],
hostname,
username,
password,
cert=cert,
verify_ssl=verify_ssl,
cert_common_name=cert_common_name,
)
if "error" in deleted_response:
return _create_state_response(name, None, None, False, deleted_response["error"])
return _create_state_response(
name, existing_ip_pool, None, True, "Deleted IP Pool {}".format(display_name)
)
else:
log.info("No IP Address Pool found with name %s", display_name)
return _create_state_response(
name, None, None, True, "No IP Address Pool found with name {}".format(display_name)
) | /saltext.vmware-22.3.7.0rc1.tar.gz/saltext.vmware-22.3.7.0rc1/src/saltext/vmware/states/nsxt_ip_pools.py | 0.598664 | 0.15109 | nsxt_ip_pools.py | pypi |
import logging
from saltext.vmware.utils import vmc_constants
from saltext.vmware.utils import vmc_state
log = logging.getLogger(__name__)
SECURITY_RULE_NOT_FOUND_ERROR = "could not be found"
try:
from saltext.vmware.modules import vmc_security_rules
HAS_SECURITY_RULES = True
except ImportError:
HAS_SECURITY_RULES = False
def __virtual__():
if not HAS_SECURITY_RULES:
return False, "'vmc_security_rules' binary not found on system"
return "vmc_security_rules"
def present(
name,
hostname,
refresh_key,
authorization_host,
org_id,
sddc_id,
domain_id,
rule_id,
verify_ssl=True,
cert=None,
source_groups=None,
destination_groups=None,
services=None,
scope=None,
action=None,
tag=None,
logged=None,
disabled=None,
notes=None,
sequence_number=None,
tags=vmc_constants.VMC_NONE,
display_name=None,
):
"""
Ensure a given security rule exists for given SDDC
hostname
The host name of NSX-T manager
refresh_key
API Token of the user which is used to get the Access Token required for VMC operations
authorization_host
Hostname of the VMC cloud console
org_id
The Id of organization to which the SDDC belongs to
sddc_id
The Id of SDDC for which the security rules should be added
domain_id
The domain_id for which the security rule should belong to. Possible values: mgw, cgw
rule_id
Id of the security_rule to be added to SDDC
verify_ssl
(Optional) Option to enable/disable SSL verification. Enabled by default.
If set to False, the certificate validation is skipped.
cert
(Optional) Path to the SSL client certificate file to connect to VMC Cloud Console.
The certificate can be retrieved from browser.
source_groups
(Optional) List of Source group paths.
We need paths as duplicate names may exist for groups under different domains.
Along with paths we support IP Address of type IPv4 and IPv6.
IP Address can be in one of the format(CIDR, IP Address, Range of IP Address).
In order to specify all groups, use the constant "ANY". This is case insensitive.
If "ANY" is used, it should be the ONLY element in the group array.
Error will be thrown if ANY is used in conjunction with other values.
If this value is not passed, then ["ANY"] will be used by default.
destination_groups
(Optional) List of Destination group paths.
We need paths as duplicate names may exist for groups under different domains.
Along with paths we support IP Address of type IPv4 and IPv6.
IP Address can be in one of the format(CIDR, IP Address, Range of IP Address).
In order to specify all groups, use the constant "ANY". This is case insensitive.
If "ANY" is used, it should be the ONLY element in the group array.
Error will be thrown if ANY is used in conjunction with other values.
If this value is not passed, then ["ANY"] will be used by default.
Note: Both source_groups and destination_groups can not be ["ANY"] when domain_id=mgw
services
(Optional) Names of services. In order to specify all services, use the constant "ANY".
This is case insensitive. If "ANY" is used, it should be the ONLY element in the services array.
Error will be thrown if ANY is used in conjunction with other values.
If this value is not passed, then ["ANY"] will be used by default.
scope
(Optional) The list of policy paths where the rule is applied LR/Edge/T0/T1/LRP etc.
Note that a given rule can be applied on multiple LRs/LRPs.
action
(Optional) The action to be applied to all the services.
Possible Values for domain_id=cgw are: ALLOW, DROP, REJECT
Possible Values for domain_id=mgw are: ALLOW
tag
(Optional) Tag applied on the rule. User level field which will be printed in CLI and packet logs.
logged
(Optional) Enable logging flag. Flag to enable packet logging. Default is disabled.
disabled
(Optional) Flag to disable the rule. Default is enabled.
notes
(Optional) Text for additional notes on changes.
sequence_number
(Optional) Sequence number of the Rule.
This field is used to resolve conflicts between multiple Rules under Security or Gateway Policy for a Domain.
If no sequence number is specified by the user, a value of 0 is assigned by default.
If there are multiple rules with the same sequence number then their order is not deterministic.
If a specific order of rules is desired, then one has to specify unique sequence numbers.
tags
(Optional) Opaque identifiers meaningful to the user.
.. code::
tags='[
{
"tag": "<tag-key-1>"
"scope": "<tag-value-1>"
},
{
"tag": "<tag-key-2>"
"scope": "<tag-value-2>"
}
]'
display_name
Identifier to use when displaying entity in logs or GUI. This is applicable for only update scenario.
For create scenario, display_name would be same as rule_id.
Example values:
.. code::
{
"display_name": "vCenter Inbound Rule"
"sequence_number": 0,
"source_groups": [
"ANY"
],
"services": ["/infra/services/HTTPS"],
"logged": false,
"disabled": false,
"destination_groups": [
"/infra/domains/mgw/groups/VCENTER"
],
"scope": [
"/infra/tier-1s/mgw"
],
"action": "ALLOW",
"tag": "",
"notes": "",
"tags": null
}
"""
input_dict = {
"source_groups": source_groups,
"destination_groups": destination_groups,
"services": services,
"scope": scope,
"action": action,
"tag": tag,
"logged": logged,
"disabled": disabled,
"notes": notes,
"sequence_number": sequence_number,
"tags": tags,
"display_name": display_name,
}
input_dict = {k: v for k, v in input_dict.items() if v != vmc_constants.VMC_NONE}
get_security_rule = __salt__["vmc_security_rules.get_by_id"](
hostname=hostname,
refresh_key=refresh_key,
authorization_host=authorization_host,
org_id=org_id,
sddc_id=sddc_id,
domain_id=domain_id,
rule_id=rule_id,
verify_ssl=verify_ssl,
cert=cert,
)
existing_security_rule = None
if "error" not in get_security_rule:
log.info("Security rule found with Id %s", rule_id)
existing_security_rule = get_security_rule
elif SECURITY_RULE_NOT_FOUND_ERROR not in get_security_rule["error"]:
return vmc_state._create_state_response(
name=name, comment=get_security_rule["error"], result=False
)
if __opts__.get("test"):
log.info("present is called with test option")
if existing_security_rule:
return vmc_state._create_state_response(
name=name, comment="State present will update Security rule {}".format(rule_id)
)
else:
return vmc_state._create_state_response(
name=name, comment="State present will create Security rule {}".format(rule_id)
)
if existing_security_rule:
updatable_keys = input_dict.keys()
is_update_required = vmc_state._check_for_updates(
existing_security_rule, input_dict, updatable_keys, ["tags"]
)
if is_update_required:
updated_security_rule = __salt__["vmc_security_rules.update"](
hostname=hostname,
refresh_key=refresh_key,
authorization_host=authorization_host,
org_id=org_id,
sddc_id=sddc_id,
domain_id=domain_id,
rule_id=rule_id,
verify_ssl=verify_ssl,
cert=cert,
source_groups=source_groups,
destination_groups=destination_groups,
services=services,
scope=scope,
action=action,
tag=tag,
logged=logged,
disabled=disabled,
notes=notes,
sequence_number=sequence_number,
tags=tags,
display_name=display_name,
)
if "error" in updated_security_rule:
return vmc_state._create_state_response(
name=name, comment=updated_security_rule["error"], result=False
)
get_security_rule_after_update = __salt__["vmc_security_rules.get_by_id"](
hostname=hostname,
refresh_key=refresh_key,
authorization_host=authorization_host,
org_id=org_id,
sddc_id=sddc_id,
domain_id=domain_id,
rule_id=rule_id,
verify_ssl=verify_ssl,
cert=cert,
)
if "error" in get_security_rule_after_update:
return vmc_state._create_state_response(
name=name, comment=get_security_rule_after_update["error"], result=False
)
return vmc_state._create_state_response(
name=name,
comment="Updated Security rule {}".format(rule_id),
old_state=existing_security_rule,
new_state=get_security_rule_after_update,
result=True,
)
else:
log.info("All fields are same as existing Security rule %s", rule_id)
return vmc_state._create_state_response(
name=name, comment="Security rule exists already, no action to perform", result=True
)
else:
log.info("No Security rule found with Id %s", rule_id)
created_security_rule = __salt__["vmc_security_rules.create"](
hostname=hostname,
refresh_key=refresh_key,
authorization_host=authorization_host,
org_id=org_id,
sddc_id=sddc_id,
domain_id=domain_id,
rule_id=rule_id,
verify_ssl=verify_ssl,
cert=cert,
source_groups=source_groups,
destination_groups=destination_groups,
services=services,
scope=scope,
action=action,
tag=tag,
logged=logged,
disabled=disabled,
notes=notes,
sequence_number=sequence_number,
tags=tags,
)
if "error" in created_security_rule:
return vmc_state._create_state_response(
name=name, comment=created_security_rule["error"], result=False
)
return vmc_state._create_state_response(
name=name,
comment="Created Security rule {}".format(rule_id),
new_state=created_security_rule,
result=True,
)
def absent(
name,
hostname,
refresh_key,
authorization_host,
org_id,
sddc_id,
domain_id,
rule_id,
verify_ssl=True,
cert=None,
):
"""
Ensure a given security rule does not exist on given SDDC
hostname
The host name of NSX-T manager
refresh_key
API Token of the user which is used to get the Access Token required for VMC operations
authorization_host
Hostname of the VMC cloud console
org_id
The Id of organization to which the SDDC belongs to
sddc_id
The Id of SDDC from which the security rule should be deleted
domain_id
The domain_id for which the security rule should belong to. Possible values: mgw, cgw
rule_id
Id of the security_rule to be deleted from SDDC
verify_ssl
(Optional) Option to enable/disable SSL verification. Enabled by default.
If set to False, the certificate validation is skipped.
cert
(Optional) Path to the SSL client certificate file to connect to VMC Cloud Console.
The certificate can be retrieved from browser.
"""
log.info("Checking if Security rule with Id %s is present", rule_id)
get_security_rule = __salt__["vmc_security_rules.get_by_id"](
hostname=hostname,
refresh_key=refresh_key,
authorization_host=authorization_host,
org_id=org_id,
sddc_id=sddc_id,
domain_id=domain_id,
rule_id=rule_id,
verify_ssl=verify_ssl,
cert=cert,
)
existing_security_rule = None
if "error" not in get_security_rule:
log.info("Security rule found with Id %s", rule_id)
existing_security_rule = get_security_rule
elif SECURITY_RULE_NOT_FOUND_ERROR not in get_security_rule["error"]:
return vmc_state._create_state_response(
name=name, comment=get_security_rule["error"], result=False
)
if __opts__.get("test"):
log.info("absent is called with test option")
if existing_security_rule:
return vmc_state._create_state_response(
name=name,
comment="State absent will delete Security rule with Id {}".format(rule_id),
)
else:
return vmc_state._create_state_response(
name=name,
comment="State absent will do nothing as no Security rule found with Id {}".format(
rule_id
),
)
if existing_security_rule:
log.info("Security rule found with Id %s", rule_id)
deleted_security_rule = __salt__["vmc_security_rules.delete"](
hostname=hostname,
refresh_key=refresh_key,
authorization_host=authorization_host,
org_id=org_id,
sddc_id=sddc_id,
domain_id=domain_id,
rule_id=rule_id,
verify_ssl=verify_ssl,
cert=cert,
)
if "error" in deleted_security_rule:
return vmc_state._create_state_response(
name=name, comment=deleted_security_rule["error"], result=False
)
return vmc_state._create_state_response(
name=name,
comment="Deleted Security rule {}".format(rule_id),
old_state=existing_security_rule,
result=True,
)
else:
log.info("No Security rule found with Id %s", rule_id)
return vmc_state._create_state_response(
name=name, comment="No Security rule found with Id {}".format(rule_id), result=True
) | /saltext.vmware-22.3.7.0rc1.tar.gz/saltext.vmware-22.3.7.0rc1/src/saltext/vmware/states/vmc_security_rules.py | 0.565059 | 0.234166 | vmc_security_rules.py | pypi |
import logging
from saltext.vmware.utils import vmc_constants
from saltext.vmware.utils import vmc_state
log = logging.getLogger(__name__)
def present(
name,
hostname,
refresh_key,
authorization_host,
org_id,
sddc_id,
tier1,
nat,
nat_rule,
verify_ssl=True,
cert=None,
action=None,
destination_network=None,
source_network=None,
translated_network=None,
translated_ports=vmc_constants.VMC_NONE,
scope=None,
service=None,
enabled=None,
firewall_match=None,
logging=None,
description=None,
tags=vmc_constants.VMC_NONE,
sequence_number=None,
display_name=None,
):
"""
Ensure a given nat rule exists for given SDDC
hostname
The host name of NSX-T manager
refresh_key
API Token of the user which is used to get the Access Token required for VMC operations
authorization_host
Hostname of the VMC cloud console
org_id
The Id of organization to which the SDDC belongs to
sddc_id
The Id of SDDC for which the nat rules should be added
domain_id
The domain_id for which the nat rules should belongs to. Possible values: mgw, cgw
nat_rule
Id of the nat rule to be added to SDDC
verify_ssl
(Optional) Option to enable/disable SSL verification. Enabled by default.
If set to False, the certificate validation is skipped.
cert
(Optional) Path to the SSL client certificate file to connect to VMC Cloud Console.
The certificate can be retrieved from browser.
action
specify type of nat rule it can have value REFLEXIVE, DNAT
REFLEXIVE nat rule require
source_network
translated_network
service should be empty
translated_ports should be None
DNAT Rule require
service
destination_network
translated_network
translated_ports
source_network can be None or input network.
destination_network
Represents the destination network
This supports single IP address or comma separated list of single IP
addresses or CIDR. This does not support IP range or IP sets.
scope
Array of policy paths of labels, ProviderInterface, NetworkInterface
service
Represents the service on which the NAT rule will be applied
source_network
Represents the source network address
This supports single IP address or comma separated list of single IP
addresses or CIDR. This does not support IP range or IP sets.
translated_network
Represents the translated network address
This supports single IP address or comma separated list of single IP
addresses or CIDR. This does not support IP range or IP sets.
translated_ports
Port number or port range
Please note, if there is service configured in this NAT rule, the translated_port
will be realized on NSX Manager as the destination_port. If there is no sevice configured,
the port will be ignored.
enabled
(Optional) Policy nat rule enabled flag
The flag, which suggests whether the NAT rule is enabled or
disabled. The default is True.
firewall_match
(Optional) Represents the firewall match flag
It indicates how the firewall matches the address after NATing if firewall
stage is not skipped.
possible values: MATCH_EXTERNAL_ADDRESS, MATCH_INTERNAL_ADDRESS
Default: "MATCH_INTERNAL_ADDRESS"
logging
(Optional) Policy nat rule logging flag
default: False
description
(Optional) Description of of nat rule
tags
(Optional) Opaque identifiers meaningful to the API user. Maximum 30 tags can be associated:
.. code-block::
tags:
- tag: <tag-key-1>
scope: <tag-value-1>
- tag: <tag-key-2>
scope: <tag-value-2>
sequence_number
(Optional) Sequence number of the nat rule
The sequence_number decides the rule_priority of a NAT rule.
default: 0
type: int
display_name
Identifier to use when displaying entity in logs or GUI. This is applicable for only update scenario.
For create scenario, display_name would be same as rule_id.
Example Values:
.. code-block::
action: REFLEXIVE
translated_network: 10.182.171.36
translated_ports: null
destination_network: ''
source_network: 192.168.1.23
sequence_number: 0
service: ''
logging: false
enabled: false
scope:
- /infra/labels/cgw-public
tags:
- tag: tag1
scope: scope1
description: ''
firewall_match: MATCH_INTERNAL_ADDRESS
"""
input_dict = {
"action": action,
"description": description,
"destination_network": destination_network,
"scope": scope,
"service": service,
"source_network": source_network,
"tags": tags,
"translated_network": translated_network,
"translated_ports": translated_ports,
"enabled": enabled,
"firewall_match": firewall_match,
"logging": logging,
"sequence_number": sequence_number,
"display_name": display_name,
}
input_dict = {k: v for k, v in input_dict.items() if v != vmc_constants.VMC_NONE}
get_nat_rule_response = __salt__["vmc_nat_rules.get_by_id"](
hostname=hostname,
refresh_key=refresh_key,
authorization_host=authorization_host,
org_id=org_id,
sddc_id=sddc_id,
tier1=tier1,
nat=nat,
nat_rule=nat_rule,
verify_ssl=verify_ssl,
cert=cert,
)
if "error" in get_nat_rule_response:
if "could not be found" in get_nat_rule_response["error"]:
get_nat_rule_response = None
else:
return vmc_state._create_state_response(
name=name, comment=get_nat_rule_response["error"], result=False
)
if __opts__.get("test"):
log.info("present is called with test option")
return vmc_state._create_state_response(
name=name,
comment="State present will {} nat rule {}".format(
"update" if get_nat_rule_response else "create", nat_rule
),
)
if get_nat_rule_response:
updatable_keys = input_dict.keys()
is_update_required = vmc_state._check_for_updates(
get_nat_rule_response, input_dict, updatable_keys, ["translated_ports", "tags"]
)
if is_update_required:
updated_nat_rule = __salt__["vmc_nat_rules.update"](
hostname=hostname,
refresh_key=refresh_key,
authorization_host=authorization_host,
org_id=org_id,
sddc_id=sddc_id,
tier1=tier1,
nat=nat,
nat_rule=nat_rule,
verify_ssl=verify_ssl,
cert=cert,
action=action,
destination_network=destination_network,
source_network=source_network,
translated_network=translated_network,
translated_ports=translated_ports,
scope=scope,
service=service,
enabled=enabled,
firewall_match=firewall_match,
logging=logging,
description=description,
tags=tags,
sequence_number=sequence_number,
display_name=display_name,
)
if "error" in updated_nat_rule:
return vmc_state._create_state_response(
name=name, comment=updated_nat_rule["error"], result=False
)
updated_nat_rule = __salt__["vmc_nat_rules.get_by_id"](
hostname=hostname,
refresh_key=refresh_key,
authorization_host=authorization_host,
org_id=org_id,
sddc_id=sddc_id,
tier1=tier1,
nat=nat,
nat_rule=nat_rule,
verify_ssl=verify_ssl,
cert=cert,
)
if "error" in updated_nat_rule:
return vmc_state._create_state_response(
name=name, comment=updated_nat_rule["error"], result=False
)
return vmc_state._create_state_response(
name=name,
comment="Updated nat rule {}".format(nat_rule),
old_state=get_nat_rule_response,
new_state=updated_nat_rule,
result=True,
)
else:
log.info("All fields are same as existing nat rule %s", nat_rule)
return vmc_state._create_state_response(
name=name, comment="Nat rule exists already, no action to perform", result=True
)
else:
log.info("No nat rule found with Id %s", nat_rule)
created_nat_rule = __salt__["vmc_nat_rules.create"](
hostname=hostname,
refresh_key=refresh_key,
authorization_host=authorization_host,
org_id=org_id,
sddc_id=sddc_id,
tier1=tier1,
nat=nat,
nat_rule=nat_rule,
verify_ssl=verify_ssl,
cert=cert,
action=action,
destination_network=destination_network,
source_network=source_network,
translated_network=translated_network,
translated_ports=translated_ports,
scope=scope,
service=service,
enabled=enabled,
firewall_match=firewall_match,
logging=logging,
description=description,
tags=tags,
sequence_number=sequence_number,
)
if "error" in created_nat_rule:
return vmc_state._create_state_response(
name=name, comment=created_nat_rule["error"], result=False
)
return vmc_state._create_state_response(
name=name,
comment="Created nat rule {}".format(nat_rule),
new_state=created_nat_rule,
result=True,
)
def absent(
name,
hostname,
refresh_key,
authorization_host,
org_id,
sddc_id,
tier1,
nat,
nat_rule,
verify_ssl=True,
cert=None,
):
"""
Ensure a given nat rule does not exist on given SDDC
hostname
The host name of NSX-T manager
refresh_key
API Token of the user which is used to get the Access Token required for VMC operations
authorization_host
Hostname of the VMC cloud console
org_id
The Id of organization to which the SDDC belongs to
sddc_id
The Id of SDDC from which the nat rule should be deleted
domain_id
The domain_id for which the nat rules should belongs to. Possible values: mgw, cgw
nat_rule
Id of the nat rule to be deleted from SDDC
verify_ssl
(Optional) Option to enable/disable SSL verification. Enabled by default.
If set to False, the certificate validation is skipped.
cert
(Optional) Path to the SSL client certificate file to connect to VMC Cloud Console.
The certificate can be retrieved from browser.
"""
log.info("Checking if nat rule with Id %s is present", nat_rule)
get_nat_rule_response = __salt__["vmc_nat_rules.get_by_id"](
hostname=hostname,
refresh_key=refresh_key,
authorization_host=authorization_host,
org_id=org_id,
sddc_id=sddc_id,
tier1=tier1,
nat=nat,
nat_rule=nat_rule,
verify_ssl=verify_ssl,
cert=cert,
)
if "error" in get_nat_rule_response:
if "could not be found" in get_nat_rule_response["error"]:
get_nat_rule_response = None
else:
return vmc_state._create_state_response(
name=name, comment=get_nat_rule_response["error"], result=False
)
if __opts__.get("test"):
log.info("absent is called with test option")
if get_nat_rule_response:
return vmc_state._create_state_response(
name=name, comment="State absent will delete nat rule with Id {}".format(nat_rule)
)
else:
return vmc_state._create_state_response(
name=name,
comment="State absent will do nothing as no nat rule found with Id {}".format(
nat_rule
),
)
if get_nat_rule_response:
log.info("Security found with Id %s", nat_rule)
deleted_nat_rule = __salt__["vmc_nat_rules.delete"](
hostname=hostname,
refresh_key=refresh_key,
authorization_host=authorization_host,
org_id=org_id,
sddc_id=sddc_id,
tier1=tier1,
nat=nat,
nat_rule=nat_rule,
verify_ssl=verify_ssl,
cert=cert,
)
if "error" in deleted_nat_rule:
return vmc_state._create_state_response(
name=name, comment=deleted_nat_rule["error"], result=False
)
return vmc_state._create_state_response(
name=name,
comment="Deleted nat rule {}".format(nat_rule),
old_state=get_nat_rule_response,
result=True,
)
else:
log.info("No nat rule found with Id %s", nat_rule)
return vmc_state._create_state_response(
name=name, comment="No nat rule found with Id {}".format(nat_rule), result=True
) | /saltext.vmware-22.3.7.0rc1.tar.gz/saltext.vmware-22.3.7.0rc1/src/saltext/vmware/states/vmc_nat_rules.py | 0.623377 | 0.191536 | vmc_nat_rules.py | pypi |
import hashlib
import logging
import socket
import ssl
import salt.exceptions
import saltext.vmware.utils.cluster as utils_cluster
import saltext.vmware.utils.common as utils_common
import saltext.vmware.utils.datacenter as utils_datacenter
# pylint: disable=no-name-in-module
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
log = logging.getLogger(__name__)
def get_hosts(
service_instance,
datacenter_name=None,
host_names=None,
cluster_name=None,
get_all_hosts=False,
):
"""
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
"""
properties = ["name"]
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
"Must specify the datacenter when specifying the cluster"
)
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = utils_common.get_root_folder(service_instance)
else:
start_point = utils_datacenter.get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append("parent")
# Search for the objects
hosts = utils_common.get_mors_with_properties(
service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties,
)
log.trace("Retrieved hosts: %s", [h["name"] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h["parent"], vim.ClusterComputeResource):
continue
parent_name = utils_common.get_managed_object_name(h["parent"])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h["object"])
continue
if h["name"] in host_names:
filtered_hosts.append(h["object"])
return filtered_hosts
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB, host_cache_manager=None):
"""
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
"""
hostname = utils_common.get_managed_object_name(host_ref)
if not host_cache_manager:
props = utils_common.get_properties_of_managed_object(
host_ref, ["configManager.cacheConfigurationManager"]
)
if not props.get("configManager.cacheConfigurationManager"):
raise salt.exceptions.VMwareObjectRetrievalError(
"Host '{}' has no host cache".format(hostname)
)
host_cache_manager = props["configManager.cacheConfigurationManager"]
log.trace(
"Configuring the host cache on host '%s', datastore '%s', " "swap size=%s MiB",
hostname,
datastore_ref.name,
swap_size_MiB,
)
spec = vim.HostCacheConfigurationSpec(datastore=datastore_ref, swapSize=swap_size_MiB)
log.trace("host_cache_spec=%s", spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
"Not enough permissions. Required privilege: " "{}".format(exc.privilegeId)
)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
utils_common.wait_for_task(task, hostname, "HostCacheConfigurationTask")
log.trace("Configured host cache on host '%s'", hostname)
return True
def list_hosts(service_instance):
"""
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
"""
return utils_common.list_objects(service_instance, vim.HostSystem)
def disconnect_host(host, service_instance):
"""
Disconnects host from vCenter instance
Returns connection state of host
host
Name of ESXi instance in vCenter.
service_instance
The Service Instance Object from which to obtain host.
"""
host = utils_common.get_mor_by_property(service_instance, vim.HostSystem, host)
if host.summary.runtime.connectionState == "disconnected":
return host.summary.runtime.connectionState
task = host.DisconnectHost_Task()
host = utils_common.wait_for_task(task, host, "disconnect host task")
return host.summary.runtime.connectionState
def reconnect_host(host, service_instance):
"""
Reconnects host from vCenter instance
Returns connection state of host
host
Name of ESXi instance in vCenter.
service_instance
The Service Instance Object from which to obtain host.
"""
host = utils_common.get_mor_by_property(service_instance, vim.HostSystem, host)
if host.summary.runtime.connectionState == "connected":
return host.summary.runtime.connectionState
task = host.ReconnectHost_Task()
ret_host = utils_common.wait_for_task(task, host, "reconnect host task")
return ret_host.summary.runtime.connectionState
def move_host(host, cluster_name, service_instance):
"""
Move host to a different cluster.
Returns connection state of host
host
Name of ESXi instance in vCenter.
cluster_name
Name of cluster to move host to.
service_instance
The Service Instance Object from which to obtain host.
"""
host_ref = utils_common.get_mor_by_property(service_instance, vim.HostSystem, host)
cluster_ref = utils_common.get_mor_by_property(
service_instance, vim.ClusterComputeResource, cluster_name
)
host_dc = utils_common.get_parent_of_type(host_ref, vim.Datacenter)
host_cluster = utils_common.get_parent_of_type(host_ref, vim.ClusterComputeResource)
cluster_dc = utils_common.get_parent_of_type(cluster_ref, vim.Datacenter)
if host_dc != cluster_dc:
raise salt.exceptions.VMwareApiError("Cluster has to be in the same datacenter")
task = cluster_ref.MoveInto_Task([host_ref])
utils_common.wait_for_task(task, cluster_name, "move host task")
return f"moved {host} from {host_cluster.name} to {cluster_ref.name}"
def remove_host(host, service_instance):
"""
Removes host from vCenter instance.
Returns connection state of host
host
Name of ESXi instance in vCenter.
service_instance
The Service Instance Object from which to obtain host.
"""
host_ref = utils_common.get_mor_by_property(service_instance, vim.HostSystem, host)
task = host_ref.Destroy_Task()
utils_common.wait_for_task(task, host, "destroy host task")
return f"removed host {host}"
def _format_ssl_thumbprint(number):
"""
Formats ssl cert number
number
Number to be formatted into ssl thumbprint
"""
string = str(number)
return ":".join(a + b for a, b in zip(string[::2], string[1::2]))
def _get_host_thumbprint(ip, verify_host_cert=True):
"""
Returns host's ssl thumbprint.
ip
IP address of host.
"""
ctx = ssl.SSLContext()
if verify_host_cert:
ctx = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH)
with socket.create_connection((ip, 443)) as _socket:
_socket.settimeout(1)
with ctx.wrap_socket(_socket, server_hostname=ip) as wrappedSocket:
cert = wrappedSocket.getpeercert(True)
sha1 = hashlib.sha1(cert).hexdigest()
response = _format_ssl_thumbprint(sha1)
return response
def add_host(
host,
root_user,
password,
cluster_name,
datacenter_name,
verify_host_cert,
connect,
service_instance,
):
"""
Adds host from vCenter instance
Returns connection state of host
host
IP address or hostname of ESXI instance.
root_user
Username with root privilege to ESXi instance.
password
Password to root user.
cluster_name
Name of cluster ESXi host is being added to.
datacenter
Datacenter that contains cluster that ESXi instance is being added to.
verify_host_cert
Validates the host's SSL certificate is signed by a CA, and that the hostname in the certificate matches the host.
connect
Specifies whether host should be connected after being added.
service_instance
The Service Instance Object to place host on.
"""
dc_ref = utils_common.get_datacenter(service_instance, datacenter_name)
cluster_ref = utils_cluster.get_cluster(dc_ref, cluster_name)
connect_spec = vim.host.ConnectSpec()
connect_spec.sslThumbprint = _get_host_thumbprint(host, verify_host_cert)
connect_spec.hostName = host
connect_spec.userName = root_user
connect_spec.password = password
task = cluster_ref.AddHost_Task(connect_spec, connect)
host_ref = utils_common.wait_for_task(task, host, "add host task")
return host_ref.summary.runtime.connectionState
def get_host(host, service_instance):
return utils_common.get_mor_by_property(service_instance, vim.HostSystem, host) | /saltext.vmware-22.3.7.0rc1.tar.gz/saltext.vmware-22.3.7.0rc1/src/saltext/vmware/utils/esxi.py | 0.488283 | 0.159087 | esxi.py | pypi |
import logging
import salt.exceptions
import saltext.vmware.utils.common as utils_common
import saltext.vmware.utils.datacenter as utils_datacenter
# pylint: disable=no-name-in-module
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
log = logging.getLogger(__name__)
def get_clusters(service_instance, datacenter_name=None, cluster_name=None):
"""
Returns clusters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_name
(Optional) Datacenter name to filter by.
cluster_name
(Optional) Exact cluster name to filter by. Requires datacenter_name.
"""
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
"datacenter_name is required when looking up by cluster_name"
)
clusters = []
for cluster in utils_common.get_mors_with_properties(
service_instance, vim.ClusterComputeResource, property_list=["name"]
):
if cluster_name and cluster_name != cluster["name"]:
continue
if (
datacenter_name
and datacenter_name
!= utils_common.get_parent_of_type(cluster["object"], vim.Datacenter).name
):
continue
clusters.append(cluster["object"])
return clusters
def get_cluster(dc_ref, cluster):
"""
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
"""
dc_name = utils_common.get_managed_object_name(dc_ref)
log.trace("Retrieving cluster '%s' from datacenter '%s'", cluster, dc_name)
si = utils_common.get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path="hostFolder",
skip=True,
type=vim.Datacenter,
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path="childEntity", skip=False, type=vim.Folder
)
],
)
items = [
i["object"]
for i in utils_common.get_mors_with_properties(
si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=["name"],
traversal_spec=traversal_spec,
)
if i["name"] == cluster
]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
"Cluster '{}' was not found in datacenter " "'{}'".format(cluster, dc_name)
)
return items[0]
def create_cluster(dc_ref, cluster_name, cluster_spec):
"""
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
"""
dc_name = utils_common.get_managed_object_name(dc_ref)
log.trace("Creating cluster '%s' in datacenter '%s'", cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
"Not enough permissions. Required privilege: " "{}".format(exc.privilegeId)
)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def update_cluster(cluster_ref, cluster_spec):
"""
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
"""
cluster_name = utils_common.get_managed_object_name(cluster_ref)
log.trace("Updating cluster '%s'", cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec, modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
"Not enough permissions. Required privilege: " "{}".format(exc.privilegeId)
)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
utils_common.wait_for_task(task, cluster_name, "ClusterUpdateTask")
def delete_cluster(service_instance, cluster_name, datacenter_name):
"""
Deletes a datacenter.
service_instance
The Service Instance Object
cluster_name
The name of the cluster to delete
datacenter_name
The datacenter name to which the cluster belongs
"""
root_folder = utils_common.get_root_folder(service_instance)
log.trace("Deleting cluster '%s' in '%s'", cluster_name, datacenter_name)
try:
dc_obj = utils_datacenter.get_datacenter(service_instance, datacenter_name)
cluster_obj = get_cluster(dc_ref=dc_obj, cluster=cluster_name)
task = cluster_obj.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
"Not enough permissions. Required privilege: {}".format(exc.privilegeId)
)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
utils_common.wait_for_task(task, cluster_name, "DeleteClusterTask")
def list_clusters(service_instance):
"""
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
"""
return utils_common.list_objects(service_instance, vim.ClusterComputeResource)
def create_drs_rule(name, affinity, vm_refs, enabled, mandatory, cluster_ref):
"""
Create a virtual machine to virtual machine affinity or anti affinity DRS rule
name
The name of the rule.
affinity
(boolean) Describes whether to make affinity or anti affinity rule.
vm_refs
Array of virtual machines associated with DRS rule.
enabled
(boolean) Enable the DRS rule being created.
mandatory
(boolean) Sets whether the rule being created is mandatory.
cluster_ref
Reference to cluster DRS rule is being created on.
"""
if affinity:
rule_spec = vim.cluster.AffinityRuleSpec()
else:
rule_spec = vim.cluster.AntiAffinityRuleSpec()
rule_spec.vm = vm_refs
rule_spec.enabled = enabled
rule_spec.mandatory = mandatory
rule_spec.name = name
spec = vim.cluster.RuleSpec(info=rule_spec, operation="add")
config_spec = vim.cluster.ConfigSpecEx(rulesSpec=[spec])
task = cluster_ref.ReconfigureEx(config_spec, modify=True)
utils_common.wait_for_task(task, "Cluster", "Create DRS rule Task")
def update_drs_rule(rule_ref, vm_refs, enabled, mandatory, cluster_ref):
"""
Update a virtual machine to virtual machine affinity or anti affinity DRS rule
rule_ref
Reference to rule with same name.
vm_refs
Array of virtual machines associated with DRS rule.
enabled
(boolean) Enable the DRS rule being created. Defaults to True.
mandatory
(optional, boolean) Sets whether the rule being created is mandatory. Defaults to None.
cluster_ref
Reference to cluster DRS rule is being created on.
"""
rule_ref.vm = vm_refs
rule_ref.enabled = enabled
rule_ref.mandatory = mandatory
spec = vim.cluster.RuleSpec(info=rule_ref, operation="edit")
config_spec = vim.cluster.ConfigSpecEx(rulesSpec=[spec])
task = cluster_ref.ReconfigureEx(config_spec, modify=True)
utils_common.wait_for_task(task, "Cluster", "Create DRS rule Task")
def drs_rule_info(rule):
"""
Returns info on a DRS rule.
rule
Reference to DRS rule.
"""
rule_info = {
"name": rule.name,
"uuid": rule.ruleUuid,
"enabled": rule.enabled,
"mandatory": rule.mandatory,
"key": rule.key,
"in_compliance": rule.inCompliance,
}
if type(rule) == vim.cluster.AntiAffinityRuleSpec or type(rule) == vim.cluster.AffinityRuleSpec:
vms = []
for vm in rule.vm:
vms.append(vm.name)
rule_info["type"] = "vm_affinity_rule"
rule_info["vms"] = vms
rule_info["affinity"] = check_affinity(rule)
elif type(rule) == vim.cluster.VmHostRuleInfo:
rule_info["type"] = "vm_host_rule"
rule_info["vm_group_name"] = rule.vmGroupName
rule_info["affine_host_group_name"] = rule.affineHostGroupName
rule_info["anti_affine_host_group_name"] = rule.antiAffineHostGroupName
elif type(rule) == vim.cluster.DependencyRuleInfo:
rule_info["type"] = "dependency_rule"
rule_info["vm_group"] = rule.vmGroup
rule_info["depends_on_vm_group"] = rule.dependsOnVmGroup
else:
raise salt.exceptions.VMwareApiError({f"Unknown affinity rule type {type(rule)}"})
return rule_info
def check_affinity(rule):
"""
returns True if rule is Affine, or False if rule is AntiAffine.
rule
Reference to DRS rule.
"""
if type(rule) == vim.cluster.AntiAffinityRuleSpec:
return False
elif type(rule) == vim.cluster.AffinityRuleSpec:
return True
else:
raise Exception(f"Rule type {type(rule)} has no affinity.") | /saltext.vmware-22.3.7.0rc1.tar.gz/saltext.vmware-22.3.7.0rc1/src/saltext/vmware/utils/cluster.py | 0.53048 | 0.228856 | cluster.py | pypi |
from . import *
import logging
__version__='2.0.0dev0'
logging.captureWarnings(True)
for logName in ['py.warnings','salt3.default']:
logger=logging.getLogger(logName)
logger.setLevel(logging.INFO)
noFrills=logging.Formatter('%(message)s')
console=logging.StreamHandler()
console.setLevel(logging.INFO)
console.setFormatter(noFrills)
logger.addHandler(console)
def test(package=None, test_path=None, args=None, plugins=None,
verbose=False, pastebin=None, remote_data=False, pep8=False,
pdb=False, coverage=False, open_files=False, **kwargs):
"""
Copied over from sncosmo v1.6 (http://sncosmo.readthedocs.org)
Run the tests using py.test. A proper set of arguments is constructed and
passed to `pytest.main`.
Parameters
----------
package : str, optional
The name of a specific package to test, e.g. 'io.fits' or 'utils'.
If nothing is specified all default tests are run.
test_path : str, optional
Specify location to test by path. May be a single file or
directory. Must be specified absolutely or relative to the
calling directory.
args : str, optional
Additional arguments to be passed to `pytest.main` in the `args`
keyword argument.
plugins : list, optional
Plugins to be passed to `pytest.main` in the `plugins` keyword
argument.
verbose : bool, optional
Convenience option to turn on verbose output from py.test. Passing
True is the same as specifying `-v` in `args`.
pastebin : {'failed','all',None}, optional
Convenience option for turning on py.test pastebin output. Set to
'failed' to upload info for failed tests, or 'all' to upload info
for all tests.
remote_data : bool, optional
Controls whether to run tests marked with @remote_data. These
tests use online data and are not run by default. Set to True to
run these tests.
pep8 : bool, optional
Turn on PEP8 checking via the pytest-pep8 plugin and disable normal
tests. Same as specifying `--pep8 -k pep8` in `args`.
pdb : bool, optional
Turn on PDB post-mortem analysis for failing tests. Same as
specifying `--pdb` in `args`.
coverage : bool, optional
Generate a test coverage report. The result will be placed in
the directory htmlcov.
open_files : bool, optional
Fail when any tests leave files open. Off by default, because
this adds extra run time to the test suite. Works only on
platforms with a working ``lsof`` command.
parallel : int, optional
When provided, run the tests in parallel on the specified
number of CPUs. If parallel is negative, it will use the all
the cores on the machine. Requires the
`pytest-xdist <https://pypi.python.org/pypi/pytest-xdist>`_ plugin
installed. Only available when using Astropy 0.3 or later.
kwargs
Any additional keywords passed into this function will be passed
on to the astropy test runner. This allows use of test-related
functionality implemented in later versions of astropy without
explicitly updating the package template.
See Also
--------
pytest.main : py.test function wrapped by `run_tests`.
"""
import os
from astropy.tests.helper import TestRunner
runner = TestRunner(os.path.dirname(__file__))
return runner.run_tests(
package=package, test_path=test_path, args=args,
plugins=plugins, verbose=verbose, pastebin=pastebin,
remote_data=remote_data, pep8=pep8, pdb=pdb,
coverage=coverage, open_files=open_files, **kwargs) | /saltshaker-sn-1.2.tar.gz/saltshaker-sn-1.2/saltshaker/__init__.py | 0.707304 | 0.305128 | __init__.py | pypi |
import matplotlib as mpl
mpl.use('agg')
import pylab as plt
import numpy as np
from saltshaker.util.txtobj import txtobj
from saltshaker.util import getmu
from functools import partial
from scipy.stats import binned_statistic
from astropy.cosmology import Planck15 as planck
from astropy.cosmology import FlatLambdaCDM
import astropy.table as at
__validfunctions__=dict()
def validfunction(validfunction):
"""Decorator to register a given function as a plotting function"""
__validfunctions__[validfunction.__name__]=validfunction
return validfunction
class ValidPlots:
def __init__(self):
self.validfunctions = {}
for key in __validfunctions__:
if __validfunctions__[key].__qualname__.split('.')[0] == \
type(self).__name__:
self.validfunctions[key] = partial(__validfunctions__[key],self)
def input(self,inputfile=None):
self.inputfile = inputfile
def output(self,outputdir=None,prefix=''):
if not outputdir.endswith('/'):
self.outputdir = '%s/'%outputdir
else: self.outputdir = outputdir
self.prefix=prefix
def run(self,*args):
validfunctions = self.validfunctions
for k in validfunctions.keys():
validfunctions[k](*args)
class lcfitting_validplots(ValidPlots):
@validfunction
def simvfit(self):
plt.rcParams['figure.figsize'] = (12,4)
plt.subplots_adjust(left=None, bottom=0.2, right=None, top=None, wspace=0, hspace=0)
fr = txtobj(self.inputfile,fitresheader=True)
if 'SIM_mB' not in fr.__dict__.keys():
print('not a simulation! skipping simvfit')
return
elif 'SIM_x1' not in fr.__dict__.keys() or 'SIM_c' not in fr.__dict__.keys():
print('not a salt2-like simulation! skipping simvfit')
return
ax1,ax2,ax3 = plt.subplot(131),plt.subplot(132),plt.subplot(133)
cbins = np.linspace(-0.3,0.3,20)
x1bins = np.linspace(-1,1,20)
mubins = np.linspace(-1,1,20)
mu = fr.mB + 0.14*fr.x1 - 3.1*fr.c + 19.36
SIM_mu = fr.SIM_mB + 0.14*fr.SIM_x1 - 3.1*fr.SIM_c + 19.36
ax1.hist(fr.c-fr.SIM_c,bins=cbins)
ax1.set_xlabel('$c - c_{\mathrm{sim}}$',fontsize=15)
ax1.set_ylabel('N$_{\mathrm{SNe}}$',fontsize=15)
ax2.hist(fr.x1-fr.SIM_x1,bins=x1bins)
ax2.set_xlabel('$x_1 - x_{1,\mathrm{sim}}$',fontsize=15)
ax3.hist(mu-SIM_mu,bins=mubins)
ax3.set_xlabel('$\mu - \mu_{\mathrm{sim}}$',fontsize=15)
ax2.set_ylabel([])
ax3.yaxis.tick_right()
plt.savefig('%s%s_simvfit.png'%(self.outputdir,self.prefix))
return
@validfunction
def hubbleplot(self):
plt.rcParams['figure.figsize'] = (12,4)
plt.subplots_adjust(
left=None, bottom=0.2, right=None, top=None, wspace=0, hspace=0)
fr = txtobj(self.inputfile,fitresheader=True)
ax = plt.axes()
fr = getmu.getmu(fr)
def errfnc(x):
return(np.std(x)/np.sqrt(len(x)))
zbins = np.logspace(np.log10(0.01),np.log10(1.0),25)
mubins = binned_statistic(
fr.zCMB,fr.mures,bins=zbins,statistic='mean').statistic
mubinerr = binned_statistic(
fr.zCMB,fr.mu,bins=zbins,statistic=errfnc).statistic
ax.errorbar(fr.zCMB,fr.mures,yerr=fr.muerr,alpha=0.2,fmt='o')
ax.errorbar(
(zbins[1:]+zbins[:-1])/2.,mubins,yerr=mubinerr,fmt='o-')
ax.axhline(0,color='k',lw=2)
ax.set_xscale('log')
ax.xaxis.set_major_formatter(plt.NullFormatter())
ax.xaxis.set_minor_formatter(plt.NullFormatter())
ax.set_ylabel('SNe',fontsize=11,labelpad=0)
ax.set_xlim([0.01,1.0])
ax.xaxis.set_ticks([0.01,0.02,0.05,0.1,0.2,0.3,0.5,1.0])
ax.xaxis.set_ticklabels(['0.01','0.02','0.05','0.1','0.2','0.3','0.5','1.0'])
ax.set_xlabel('$z_{CMB}$',fontsize=15)
ax.set_ylabel('$\mu - \mu_{\Lambda CDM}$',fontsize=15)
plt.savefig('%s%s_hubble.png'%(self.outputdir,self.prefix))
return
class getmu_validplots(ValidPlots):
@validfunction
def hubble(self):
plt.clf()
plt.rcParams['figure.figsize'] = (12,4)
plt.subplots_adjust(
left=None, bottom=0.2, right=None, top=None, wspace=0, hspace=0)
fr = txtobj(self.inputfile,fitresheader=True)
ax = plt.axes()
cosmo = FlatLambdaCDM(H0=70, Om0=0.315, Tcmb0=planck.Tcmb0)
fr.MURES = fr.MU - cosmo.distmod(fr.zCMB).value
def errfnc(x):
return(np.std(x)/np.sqrt(len(x)))
zbins = np.logspace(np.log10(0.01),np.log10(1.0),25)
mubins = binned_statistic(
fr.zCMB,fr.MURES,bins=zbins,statistic='mean').statistic
mubinerr = binned_statistic(
fr.zCMB,fr.MU,bins=zbins,statistic=errfnc).statistic
ax.errorbar(fr.zCMB,fr.MURES,yerr=fr.MUERR,alpha=0.2,fmt='o')
ax.errorbar(
(zbins[1:]+zbins[:-1])/2.,mubins,yerr=mubinerr,fmt='o-')
ax.axhline(0,color='k',lw=2)
ax.set_xscale('log')
ax.xaxis.set_major_formatter(plt.NullFormatter())
ax.xaxis.set_minor_formatter(plt.NullFormatter())
ax.set_ylabel('SNe',fontsize=11,labelpad=0)
ax.set_xlim([0.01,1.0])
ax.xaxis.set_ticks([0.01,0.02,0.05,0.1,0.2,0.3,0.5,1.0])
ax.xaxis.set_ticklabels(['0.01','0.02','0.05','0.1','0.2','0.3','0.5','1.0'])
ax.set_xlabel('$z_{CMB}$',fontsize=15)
ax.set_ylabel('$\mu - \mu_{\Lambda CDM}$',fontsize=15)
plt.savefig('%s%s_BBC_hubble_prelim.png'%(self.outputdir,self.prefix))
@validfunction
def nuisancebias(self):
sigint,alpha,beta = [],[],[]
alphaerr,betaerr = [],[]
with open(self.inputfile) as fin:
for line in fin:
if line.startswith('#') and 'sigint' in line and '=' in line:
sigint.append(float(line.split()[3]))
elif line.startswith('#') and 'alpha0' in line and '=' in line:
alpha.append(float(line.split()[3]))
alphaerr.append(float(line.split()[5]))
elif line.startswith('#') and 'beta0' in line and '=' in line:
beta.append(float(line.split()[3]))
betaerr.append(float(line.split()[5]))
if np.any(np.isnan(sigint+alpha+beta)):
return
fr = txtobj(self.inputfile,fitresheader=True)
if 'SIM_alpha' not in fr.__dict__.keys() and 'SIM_beta' not in fr.__dict__.keys():
sim_alpha = -99.
sim_beta = -99.
else:
sim_alpha = fr.SIM_alpha[0]
sim_beta = fr.SIM_beta[0]
plt.clf()
ax = plt.axes()
ax.set_ylabel('Nuisance Parameters',fontsize=15)
ax.xaxis.set_ticks([1,2,3])
ax.xaxis.set_ticklabels(['alpha','beta',r'$\sigma_{\mathrm{int}}$'],rotation=30)
ax.errorbar(1,np.mean(alpha)-sim_alpha,yerr=np.sqrt(np.std(alpha)**2/len(alpha)),fmt='o',color='C0',label='fit')
ax.errorbar(2,np.mean(beta)-sim_beta,yerr=np.sqrt(np.std(beta)**2/len(beta)),fmt='o',color='C0')
ax.errorbar(3,np.mean(sigint)-0.1,fmt='o',color='C0')
ax.axhline(0,color='k',lw=2)
ax.text(0.17,0.9,r"""$\alpha_{sim} = %.3f$
$\alpha_{fit} = %.3f \pm %.3f$"""%(
sim_alpha,np.mean(alpha),np.sqrt(np.std(alpha)**2/len(alpha))),transform=ax.transAxes,ha='center',va='center')
ax.text(0.5,0.9,r"""$\beta_{sim} = %.3f$
$\beta_{fit} = %.3f \pm %.3f$"""%(
sim_beta,np.mean(beta),np.sqrt(np.std(beta)**2/len(beta))),transform=ax.transAxes,ha='center',va='center')
ax.text(0.83,0.9,"""$\sigma_{int} = %.3f$"""%(
np.mean(sigint)),transform=ax.transAxes,ha='center',va='center')
ax.set_xlim([0.5,3.5])
plt.savefig('%s%s_nuisancebias.png'%(self.outputdir,self.prefix))
class cosmofit_validplots(ValidPlots):
@validfunction
def cosmopar(self):
# plot w, Om numbers and biases
# could make a tex table also
Om_Planck = 0.315
w_Planck = -1
data = at.Table.read(self.inputfile,format='ascii')
print(data)
plt.clf()
ax = plt.axes()
ax.set_ylabel('Cosmo Parameters',fontsize=15)
ax.xaxis.set_ticks([1,2])
ax.xaxis.set_ticklabels(['$w$','$\Omega_M$'],rotation=30)
ax.errorbar(1,np.mean(data['w'])-w_Planck,yerr=np.sqrt(np.std(data['w'])**2/len(data['w'])),fmt='o',color='C0',label='fit')
ax.errorbar(2,np.mean(data['OM'])-Om_Planck,yerr=np.sqrt(np.std(data['OM'])**2/len(data['OM'])),fmt='o',color='C0')
ax.axhline(0,color='k',lw=2)
ax.text(0.17,0.9,r"""$w = %.3f \pm %.3f$"""%(
np.mean(data['w']),np.sqrt(np.std(data['w'])**2/len(data['w']))),transform=ax.transAxes,ha='center',va='center')
ax.text(0.5,0.9,r"""$\Omega_M = %.3f \pm %.3f$"""%(
np.mean(data['OM']),np.sqrt(np.std(data['OM'])**2/len(data['OM']))),transform=ax.transAxes,ha='center',va='center')
ax.set_xlim([0.5,2.5])
plt.savefig('%s%s_cosmopar.png'%(self.outputdir,self.prefix)) | /saltshaker-sn-1.2.tar.gz/saltshaker-sn-1.2/saltshaker/pipeline/validplot.py | 0.447943 | 0.304701 | validplot.py | pypi |
import os
import numpy as np
from saltshaker.util import snana
from saltshaker.util.estimate_tpk_bazin import estimate_tpk_bazin
from astropy.io import fits
from saltshaker.initfiles import init_rootdir
from saltshaker.data import data_rootdir
from astroquery.irsa_dust import IrsaDust
from astropy.coordinates import SkyCoord
import astropy.units as u
import warnings
from time import time
import scipy.stats as ss
import astropy.table as at
import logging
import abc
from copy import deepcopy
log=logging.getLogger(__name__)
class SNDataReadError(ValueError):
pass
class BreakLoopException(RuntimeError):
pass
def checksize(a,b):
assert(a.size==b.size)
class SALTtrainingdata(metaclass=abc.ABCMeta):
__slots__=[]
@property
@abc.abstractmethod
def __listdatakeys__(self):
pass
def clip(self,clipcriterion):
copy=deepcopy(self)
for key in self.__listdatakeys__:
setattr(copy,key,getattr(self,key)[clipcriterion])
return copy
class SALTtraininglightcurve(SALTtrainingdata):
__slots__=['mjd','tobs','phase','fluxcal','fluxcalerr','filt']
def __init__(self,z,tpk_guess,flt,sn ):
assert((sn.FLT==flt).sum()>0)
inlightcurve= (sn.FLT==flt)
self.mjd=sn.MJD[inlightcurve]
sortinds=np.argsort(self.mjd)
self.mjd=self.mjd[sortinds]
self.tobs=(self.mjd-tpk_guess)
self.phase=self.tobs/(1+z)
self.fluxcal=sn.FLUXCAL[inlightcurve][sortinds]
self.fluxcalerr=sn.FLUXCALERR[inlightcurve][sortinds]
self.filt=flt
checksize(self.tobs,self.mjd)
checksize(self.mjd,self.fluxcal)
checksize(self.mjd,self.fluxcalerr)
__listdatakeys__={'tobs','mjd','phase','fluxcal','fluxcalerr'}
def __len__(self):
return len(self.tobs)
class SALTtrainingspectrum(SALTtrainingdata):
__slots__=['flux', 'phase', 'wavelength', 'fluxerr', 'tobs','restwavelength','mjd']
def __init__(self,snanaspec,z,tpk_guess,binspecres=None ):
m=snanaspec['SPECTRUM_MJD']
if snanaspec['FLAM'].size==0:
raise SNDataReadError(f'Spectrum has no observations')
if 'LAMAVG' in snanaspec:
wavelength = snanaspec['LAMAVG']
elif 'LAMMIN' in snanaspec and 'LAMMAX' in snanaspec:
wavelength = (snanaspec['LAMMIN']+snanaspec['LAMMAX'])/2
else:
raise SNDataReadError('couldn\'t find wavelength data')
self.wavelength=wavelength
self.fluxerr=snanaspec['FLAMERR']
self.flux=snanaspec['FLAM']
self.tobs=m -tpk_guess
self.mjd=m
self.phase=self.tobs/(1+z)
if 'DQ' in snanaspec:
iGood=(snanaspec['DQ']==1)
elif 'SPECFLAG' in snanaspec:
iGood=(snanaspec['SPECFLAG']==1)
else:
iGood=np.ones(len(self),dtype=bool)
iGood = iGood & (~np.isnan(self.flux))
if ('DQ' in snanaspec and (snanaspec['DQ']==1).sum() == 0) or np.all(np.isnan(self.flux)):
raise SNDataReadError('Spectrum is all marked as invalid data')
if binspecres is not None:
flux = self.flux[iGood]
wavelength = self.wavelength[iGood]
fluxerr = self.fluxerr[iGood]
fluxmax = np.max(flux)
weights = 1/(fluxerr/fluxmax)**2.
def weighted_avg(indices):
"""
Return the weighted average and standard deviation.
indices, weights -- Numpy ndarrays with the same shape.
"""
average = np.average(flux[indices]/fluxmax, weights=weights[indices])
variance = np.average((flux[indices]/fluxmax-average)**2, weights=weights[indices]) # Fast and numerically precise
return average
def weighted_err(indices):
"""
Return the weighted average and standard deviation.
indices, weights -- Numpy ndarrays with the same shape.
"""
average = np.average(flux[indices]/fluxmax, weights=weights[indices])
variance = np.average((flux[indices]/fluxmax-average)**2, weights=weights[indices]) # Fast and numerically precise
return np.sqrt(variance)
wavebins = np.linspace(np.min(wavelength),np.max(wavelength),int((np.max(wavelength)-np.min(wavelength))/(binspecres*(1+z))))
binned_flux = ss.binned_statistic(wavelength,range(len(flux)),bins=wavebins,statistic=weighted_avg).statistic
binned_fluxerr = ss.binned_statistic(wavelength,range(len(flux)),bins=wavebins,statistic=weighted_err).statistic
iGood = (binned_flux == binned_flux)
self.flux = binned_flux[iGood]
self.wavelength = (wavebins[1:][iGood]+wavebins[:-1][iGood])/2.
self.fluxerr = binned_fluxerr[iGood]
else:
self.flux = self.flux[iGood]
self.wavelength = self.wavelength[iGood]
self.fluxerr = self.fluxerr[iGood]
# error floor
self.fluxerr = np.hypot(self.fluxerr, 5e-3*np.max(self.flux))
self.restwavelength= self.wavelength/ (1+z)
for key in self.__listdatakeys__:
checksize(self.wavelength,getattr(self,key) )
__listdatakeys__={'wavelength','flux','fluxerr','restwavelength'}
def __len__(self):
return len(self.wavelength)
class SALTtrainingSN:
__slots__=['survey', 'zHelio', 'MWEBV', 'snid', 'tpk_guess', 'salt2fitprob', 'photdata','specdata']
def __init__(self,sn,
estimate_tpk=False,snpar=None,
pkmjddict={},binspecres=None):
if 'FLT' not in sn.__dict__.keys():
raise SNDataReadError('can\'t find SN filters!')
if 'SURVEY' in sn.__dict__.keys():
self.survey=sn.SURVEY
else:
raise SNDataReadError('SN %s has no SURVEY key, which is needed to find the filter transmission curves'%sn.SNID[0])
if not 'REDSHIFT_HELIO' in sn.__dict__.keys():
raise SNDataReadError('SN %s has no heliocentric redshift information in the header'%sn.SNID)
if 'PEAKMJD' in sn.__dict__.keys(): sn.SEARCH_PEAKMJD = sn.PEAKMJD
# FITS vs. ASCII format issue in the parser
if isinstance(sn.REDSHIFT_HELIO,str): self.zHelio = float(sn.REDSHIFT_HELIO.split('+-')[0])
else: self.zHelio = sn.REDSHIFT_HELIO
if estimate_tpk:
if 'B' in sn.FLT:
tpk,tpkmsg = estimate_tpk_bazin(
sn.MJD[sn.FLT == 'B'],sn.FLUXCAL[sn.FLT == 'B'],sn.FLUXCALERR[sn.FLT == 'B'],max_nfev=100000,t0=sn.SEARCH_PEAKMJD)
elif 'g' in sn.FLT:
tpk,tpkmsg = estimate_tpk_bazin(
sn.MJD[sn.FLT == 'g'],sn.FLUXCAL[sn.FLT == 'g'],sn.FLUXCALERR[sn.FLT == 'g'],max_nfev=100000,t0=sn.SEARCH_PEAKMJD)
elif 'c' in sn.FLT:
tpk,tpkmsg = estimate_tpk_bazin(
sn.MJD[sn.FLT == 'c'],sn.FLUXCAL[sn.FLT == 'c'],sn.FLUXCALERR[sn.FLT == 'c'],max_nfev=100000,t0=sn.SEARCH_PEAKMJD)
else:
raise SNDataReadError(f'need a blue filter to estimate tmax')
elif len(pkmjddict.keys()):
try:
tpk = pkmjddict[sn.SNID]
tpkmsg = 'success: peak MJD provided'
except KeyError:
tpkmsg = f'can\'t find tmax in pkmjd file'
else:
tpk = sn.SEARCH_PEAKMJD
if type(tpk) == str:
tpk = float(sn.SEARCH_PEAKMJD.split()[0])
tpkmsg = 'success: peak MJD found in LC file'
if 'success' not in tpkmsg:
raise SNDataReadError(f'can\'t estimate t_max for SN {sn.SNID}: {tpkmsg}')
# to allow a fitprob cut
if snpar is not None:
if 'FITPROB' in snpar.keys() and str(sn.SNID) in snpar['SNID']:
fitprob = snpar['FITPROB'][str(sn.SNID) == snpar['SNID']][0]
else:
fitprob = -99
else:
fitprob = -99
if 'SIM_SALT2x0' in sn.__dict__.keys(): self.SIM_SALT2x0 = sn.SIM_SALT2x0
if 'SIM_SALT2x1' in sn.__dict__.keys(): self.SIM_SALT2x1 = sn.SIM_SALT2x1
if 'SIM_SALT2c' in sn.__dict__.keys(): self.SIM_SALT2c = sn.SIM_SALT2c
#Find E(B-V) from Milky Way
if 'MWEBV' in sn.__dict__.keys():
try: self.MWEBV = float(sn.MWEBV.split()[0])
except: self.MWEBV= float(sn.MWEBV)
elif 'RA' in sn.__dict__.keys() and 'DEC' in sn.__dict__.keys():
log.warning('determining MW E(B-V) from IRSA for SN %s using RA/Dec in file'%sn.SNID)
sc = SkyCoord(sn.RA,sn.DEC,frame="fk5",unit=u.deg)
self.MWEBV = IrsaDust.get_query_table(sc)['ext SandF mean'][0]
else:
raise SNDataReadError('Could not determine E(B-V) from files. Set MWEBV keyword in input file header for SN %s'%sn.SNID)
self.snid=sn.SNID
self.tpk_guess=tpk
self.salt2fitprob=fitprob
self.photdata = {flt:SALTtraininglightcurve(self.zHelio,tpk_guess= self.tpk_guess,flt=flt, sn=sn) for flt in np.unique(sn.FLT)}
try: assert(len(self.photdata)>0)
except AssertionError:
raise SNDataReadError(f'No lightcurves for SN {sn.SNID}')
self.specdata = {}
if 'SPECTRA' in sn.__dict__:
for speccount,k in enumerate(sn.SPECTRA):
try:
self.specdata[speccount]=SALTtrainingspectrum(sn.SPECTRA[k],self.zHelio,self.tpk_guess,binspecres=binspecres)
except SNDataReadError as e:
if hasattr(e,'message'):
log.warning(f'{e.message}, skipping spectrum {k} for SN {self.snid}')
else:
log.warning(f'DataReadError, skipping spectrum {k} for SN {self.snid}')
@property
def num_lc(self):
return len(self.photdata)
@property
def num_photobs(self):
return sum([len(self.photdata[filt]) for filt in self.photdata])
@property
def num_specobs(self):
return sum([len(self.specdata[key]) for key in self.specdata])
@property
def num_spec(self):
return len(self.specdata)
@property
def filt(self):
return list(self.photdata.keys())
def rdkcor(surveylist,options):
kcordict = {}
for survey in surveylist:
kcorfile = options.__dict__['%s_kcorfile'%survey]
subsurveys = options.__dict__['%s_subsurveylist'%survey].split(',')
kcorfile = os.path.expandvars(kcorfile)
if not os.path.exists(kcorfile):
log.info('kcor file %s does not exist. Checking %s/kcor'%(kcorfile,data_rootdir))
kcorfile = '%s/kcor/%s'%(data_rootdir,kcorfile)
if not os.path.exists(kcorfile):
raise RuntimeError('kcor file %s does not exist'%kcorfile)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
hdu = fits.open(kcorfile)
zpoff = hdu[1].data
snsed = hdu[2].data
filtertrans = hdu[5].data
primarysed = hdu[6].data
hdu.close()
except:
raise RuntimeError('kcor file format is non-standard for kcor file %s'%kcorfile)
for subsurvey in subsurveys:
kcorkey = '%s(%s)'%(survey,subsurvey)
if not subsurvey: kcorkey = survey[:]
kcordict[kcorkey] = {}
kcordict[kcorkey]['primarywave'] = np.array(primarysed['wavelength (A)'])
kcordict[kcorkey]['snflux'] = np.array(snsed['SN Flux (erg/s/cm^2/A)'])
if 'AB' in primarysed.names:
kcordict[kcorkey]['AB'] = np.array(primarysed['AB'])
if 'Vega' in primarysed.names:
kcordict[kcorkey]['Vega'] = np.array(primarysed['Vega'])
if 'VEGA' in primarysed.names:
kcordict[kcorkey]['Vega'] = np.array(primarysed['VEGA'])
if 'BD17' in primarysed.names:
kcordict[kcorkey]['BD17'] = np.array(primarysed['BD17'])
for filt in zpoff['Filter Name']:
#log.warning('Using only the last character of kcor-provided filter names')
if not options.filters_use_lastchar_only:
internalfiltname=filt[:] #[-1]
else:
internalfiltname=filt[-1]
kcordict[kcorkey][internalfiltname] = {}
kcordict[kcorkey][internalfiltname]['filtwave'] = np.array(filtertrans['wavelength (A)'])
kcordict[kcorkey][internalfiltname]['fullname'] = filt #.split('/')[0].replace(' ','')
kcordict[kcorkey][internalfiltname]['filttrans'] = np.array(filtertrans[filt])
lambdaeff = np.sum(kcordict[kcorkey][internalfiltname]['filtwave']*filtertrans[filt])/np.sum(filtertrans[filt])
kcordict[kcorkey][internalfiltname]['lambdaeff'] = lambdaeff
kcordict[kcorkey][internalfiltname]['magsys'] = \
zpoff['Primary Name'][zpoff['Filter Name'] == filt][0]
kcordict[kcorkey][internalfiltname]['primarymag'] = \
zpoff['Primary Mag'][zpoff['Filter Name'] == filt][0] - zpoff['ZPoff(SNpot)'][zpoff['Filter Name'] == filt][0]
kcordict[kcorkey][internalfiltname]['zpoff'] = \
zpoff['ZPoff(Primary)'][zpoff['Filter Name'] == filt][0] - zpoff['ZPoff(SNpot)'][zpoff['Filter Name'] == filt][0]
if (options.calibrationshiftfile):
log.info('Calibration shift file provided, applying offsets:')
#Calibration dictionary:
with open(options.calibrationshiftfile) as file:
for line in file:
log.info(f'Applying shift: {line}')
try:
line=line.split('#')[0].split()
if len(line)==0:
continue
shifttype,survey,filter,shift=line
shift=float(shift)
#filter=filter[-1]#filter=filter[filter.index('/')+1:]
if not options.calib_survey_ignore:
if shifttype=='MAGSHIFT':
kcordict[survey][filter]['zpoff'] +=shift
kcordict[survey][filter]['primarymag']+=shift
elif shifttype=='LAMSHIFT' or shifttype=='WAVESHIFT':
kcordict[survey][filter]['filtwave']+=shift
kcordict[survey][filter]['lambdaeff']+=shift
else:
raise ValueError(f'Invalid calibration shift: {shifttype}')
else:
has_filter = False
for survey_forfilt in kcordict.keys():
if filter in kcordict[survey_forfilt].keys():
if shifttype=='MAGSHIFT':
kcordict[survey_forfilt][filter]['zpoff'] +=shift
kcordict[survey_forfilt][filter]['primarymag']+=shift
has_filter = True
elif shifttype=='LAMSHIFT' or shifttype=='WAVESHIFT':
kcordict[survey_forfilt][filter]['filtwave']+=shift
kcordict[survey_forfilt][filter]['lambdaeff']+=shift
has_filter = True
else:
raise ValueError(f'Invalid calibration shift: {shifttype}')
if not has_filter:
raise ValueError(f'could not find filter {filter} in any kcor files')
except Exception as e:
log.critical(f'Could not apply calibration offset \"{line[:-1]}\"')
raise e
log.info('Calibration offsets applied')
else:
log.info('No calibration shift file provided, continuing')
primarywave,primarysed = np.genfromtxt('%s/flatnu.dat'%init_rootdir,unpack=True)
kcordict['default'] = {}
initBfilt = '%s/Bessell90_B.dat'%init_rootdir
filtwave,filttp = np.genfromtxt(initBfilt,unpack=True)
kcordict['default']['Bwave'] = filtwave
kcordict['default']['Btp'] = filttp
initVfilt = '%s/Bessell90_V.dat'%init_rootdir
filtwave,filttp = np.genfromtxt(initVfilt,unpack=True)
kcordict['default']['Vwave'] = filtwave
kcordict['default']['Vtp'] = filttp
kcordict['default']['AB']=primarysed
kcordict['default']['primarywave']=primarywave
return kcordict
def rdAllData(snlists,estimate_tpk,
dospec=False,peakmjdlist=None,
waverange=[2000,9200],binspecres=None,snparlist=None,maxsn=None):
datadict = {}
if peakmjdlist:
pksnid,pkmjd = np.loadtxt(peakmjdlist,unpack=True,dtype=str,usecols=[0,1])
pkmjd = pkmjd.astype('float')
pkmjddict={key:val for key,val in zip(pksnid,pkmjd)}
else: pkmjd,pksnid,pkmjddict=[],[],{}
if snparlist:
snpar = at.Table.read(snparlist,format='ascii')
snpar['SNID'] = snpar['SNID'].astype(str)
else: snpar = None
nsnperlist = []
for snlist in snlists.split(','):
snlist = os.path.expandvars(snlist)
if not os.path.exists(snlist):
log.info('SN list file %s does not exist. Checking %s/trainingdata/%s'%(snlist,data_rootdir,snlist))
snlist = '%s/trainingdata/%s'%(data_rootdir,snlist)
if not os.path.exists(snlist):
raise RuntimeError('SN list file %s does not exist'%snlist)
snfiles = np.genfromtxt(snlist,dtype='str')
snfiles = np.atleast_1d(snfiles)
nsnperlist += [len(snfiles)]
nsnperlist=np.array(nsnperlist)
skipcount = 0
rdstart = time()
#If there is a maximum number of SNe to be taken in total, take an equal number from each snlist
if maxsn is not None: maxcount = nsnperlist*maxsn/nsnperlist.sum()
else: maxcount = [np.inf]*len(snlists.split(','))
#Check whether to add the supernova to a dictionary of results; if not return False, otherwise do so and return True
def processsupernovaobject(outputdict,sn,maxnum):
if 'FLT' not in sn.__dict__.keys() and \
'BAND' in sn.__dict__.keys():
sn.FLT = sn.BAND
sn.SNID=str(sn.SNID)
if sn.SNID in datadict: duplicatesurvey=datadict[sn.SNID].survey
elif sn.SNID in outputdict: duplicatesurvey=outputdict[sn.SNID].survey
else: duplicatesurvey=None
if not duplicatesurvey is None:
log.warning(f'SNID {sn.SNID} is a duplicate! Keeping version from survey {duplicatesurvey}, discarding version from survey {sn.SURVEY}')
return False
try:
saltformattedsn=SALTtrainingSN(
sn,estimate_tpk=estimate_tpk,
pkmjddict=pkmjddict,snpar=snpar,
binspecres=binspecres)
except SNDataReadError as e:
log.warning(e.args[0])
return False
if len(saltformattedsn.specdata) == 0:
log.debug(f'SN {sn.SNID} has no supernova spectra')
outputdict[saltformattedsn.snid]=saltformattedsn
if len(outputdict) >= maxnum:
raise BreakLoopException('Maximum number of SNe read in')
return True
for snlist,maxct in zip(snlists.split(','),maxcount):
tsn = time()
snlist = os.path.expandvars(snlist)
snfiles = np.genfromtxt(snlist,dtype='str')
snfiles = np.atleast_1d(snfiles)
snreadinfromlist={}
try:
for f in snfiles:
if '/' not in f:
f = os.path.join(os.path.dirname(snlist),f)
#If this is a fits file, read the list of snids and read them out one at a time
if f.lower().endswith('.fits') or f.lower().endswith('.fits.gz'):
if f.lower().endswith('.fits') and not os.path.exists(f) and os.path.exists('{}.gz'.format(f)):
f = '{}.gz'.format(f)
# get list of SNIDs
hdata = fits.getdata( f, ext=1 )
survey = fits.getval( f, 'SURVEY')
Nsn = fits.getval( f, 'NAXIS2', ext=1 )
snidlist = np.array([ int( hdata[isn]['SNID'] ) for isn in range(Nsn) ])
if os.path.exists(f.replace('_HEAD.FITS','_SPEC.FITS')):
specfitsfile = f.replace('_HEAD.FITS','_SPEC.FITS')
else: specfitsfile = None
for snid in snidlist:
sn = snana.SuperNova(
snid=snid,headfitsfile=f,photfitsfile=f.replace('_HEAD.FITS','_PHOT.FITS'),
specfitsfile=specfitsfile,readspec=dospec)
if 'SUBSURVEY' in sn.__dict__.keys() and not (len(np.unique(sn.SUBSURVEY))==1 and survey.strip()==np.unique(sn.SUBSURVEY)[0].strip()) \
and sn.SUBSURVEY.strip() != '':
sn.SURVEY = f"{survey}({sn.SUBSURVEY})"
else:
sn.SURVEY = survey
skipcount+=not processsupernovaobject(snreadinfromlist,sn,maxct)
else:
if '/' not in f:
f = '%s/%s'%(os.path.dirname(snlist),f)
sn = snana.SuperNova(f,readspec=dospec)
skipcount+=not processsupernovaobject(snreadinfromlist,sn,maxct)
except BreakLoopException:
pass
datadict.update(snreadinfromlist)
log.info(f'read in {len(datadict)} SNe, {skipcount} SNe were not read')
log.info('reading data files took %.1f'%(time()-rdstart))
if not len(datadict.keys()):
raise RuntimeError('no light curve data to train on!!')
return datadict | /saltshaker-sn-1.2.tar.gz/saltshaker-sn-1.2/saltshaker/util/readutils.py | 0.65379 | 0.278036 | readutils.py | pypi |
import abc
import os
from collections import OrderedDict as odict
from copy import copy as cp
from textwrap import dedent
from math import ceil
import itertools
import numpy as np
from scipy.interpolate import (InterpolatedUnivariateSpline as Spline1d,
RectBivariateSpline as Spline2d)
from astropy.utils.misc import isiterable
from astropy import (cosmology, units as u, constants as const)
import extinction
from sncosmo.io import (read_griddata_ascii, read_griddata_fits,
read_multivector_griddata_ascii)
from sncosmo._registry import Registry
from sncosmo.bandpasses import get_bandpass, Bandpass
from sncosmo.magsystems import get_magsystem
from sncosmo.salt2utils import BicubicInterpolator, SALT2ColorLaw
from sncosmo.utils import integration_grid
from sncosmo.constants import HC_ERG_AA, MODEL_BANDFLUX_SPACING
from sncosmo.models import Source
class SALT3Source(Source):
"""The SALT3 Type Ia supernova spectral timeseries model.
The spectral flux density of this model is given by
.. math::
F(t, \\lambda) = x_0 (M_0(t, \\lambda) + x_1 M_1(t, \\lambda))
\\times 10^{-0.4 CL(\\lambda) c}
where ``x0``, ``x1`` and ``c`` are the free parameters of the model,
``M_0``, ``M_1`` are the zeroth and first components of the model, and
``CL`` is the colorlaw, which gives the extinction in magnitudes for
``c=1``.
Parameters
----------
modeldir : str, optional
Directory path containing model component files. Default is `None`,
which means that no directory is prepended to filenames when
determining their path.
m0file, m1file, clfile : str or fileobj, optional
Filenames of various model components. Defaults are:
* m0file = 'salt3_template_0.dat' (2-d grid)
* m1file = 'salt3_template_1.dat' (2-d grid)
* clfile = 'salt3_color_correction.dat'
errscalefile, lcrv00file, lcrv11file, lcrv01file, cdfile : str or fileobj
(optional) Filenames of various model components for
model covariance in synthetic photometry. See
``bandflux_rcov`` for details. Defaults are:
* errscalefile = 'salt3_lc_dispersion_scaling.dat' (2-d grid)
* lcrv00file = 'salt3_lc_relative_variance_0.dat' (2-d grid)
* lcrv11file = 'salt3_lc_relative_variance_1.dat' (2-d grid)
* lcrv01file = 'salt3_lc_relative_covariance_01.dat' (2-d grid)
* cdfile = 'salt3_color_dispersion.dat' (1-d grid)
Notes
-----
The "2-d grid" files have the format ``<phase> <wavelength>
<value>`` on each line.
The phase and wavelength values of the various components don't
necessarily need to match. (In the most recent salt2 model data,
they do not all match.) The phase and wavelength values of the
first model component (in ``m0file``) are taken as the "native"
sampling of the model, even though these values might require
interpolation of the other model components.
"""
# These files are distributed with SALT3 model data but not currently
# used:
# v00file = 'salt3_spec_variance_0.dat' : 2dgrid
# v11file = 'salt3_spec_variance_1.dat' : 2dgrid
# v01file = 'salt3_spec_covariance_01.dat' : 2dgrid
_param_names = ['x0', 'x1', 'c']
param_names_latex = ['x_0', 'x_1', 'c']
_SCALE_FACTOR = 1e-12
def __init__(self, modeldir=None,
m0file='salt3_template_0.dat',
m1file='salt3_template_1.dat',
clfile='salt3_color_correction.dat',
cdfile='salt3_color_dispersion.dat',
errscalefile='salt3_lc_dispersion_scaling.dat',
lcrv00file='salt3_lc_variance_0.dat',
lcrv11file='salt3_lc_variance_1.dat',
lcrv01file='salt3_lc_covariance_01.dat',
name=None, version=None):
self.name = name
self.version = version
self._model = {}
self._parameters = np.array([1., 0., 0.])
names_or_objs = {'M0': m0file, 'M1': m1file,
'LCRV00': lcrv00file, 'LCRV11': lcrv11file,
'LCRV01': lcrv01file, 'errscale': errscalefile,
'cdfile': cdfile, 'clfile': clfile}
# Make filenames into full paths.
if modeldir is not None:
for k in names_or_objs:
v = names_or_objs[k]
if (v is not None and isinstance(v, str)):
names_or_objs[k] = os.path.join(modeldir, v)
# model components are interpolated to 2nd order
for key in ['M0', 'M1']:
phase, wave, values = read_griddata_ascii(names_or_objs[key])
values *= self._SCALE_FACTOR
self._model[key] = BicubicInterpolator(phase, wave, values)
# The "native" phases and wavelengths of the model are those
# of the first model component.
if key == 'M0':
self._phase = phase
self._wave = wave
# model covariance is interpolated to 1st order
for key in ['LCRV00', 'LCRV11', 'LCRV01', 'errscale']:
phase, wave, values = read_griddata_ascii(names_or_objs[key])
self._model[key] = BicubicInterpolator(phase, wave, values)
# Set the colorlaw based on the "color correction" file.
self._set_colorlaw_from_file(names_or_objs['clfile'])
# Set the color dispersion from "color_dispersion" file
w, val = np.loadtxt(names_or_objs['cdfile'], unpack=True)
self._colordisp = Spline1d(w, val, k=1) # linear interp.
def _flux(self, phase, wave):
m0 = self._model['M0'](phase, wave)
m1 = self._model['M1'](phase, wave)
return (self._parameters[0] * (m0 + self._parameters[1] * m1) *
10. ** (-0.4 * self._colorlaw(wave) * self._parameters[2]))
def _bandflux_rvar_single(self, band, phase):
"""Model relative variance for a single bandpass."""
# Raise an exception if bandpass is out of model range.
if (band.minwave() < self._wave[0] or band.maxwave() > self._wave[-1]):
raise ValueError('bandpass {0!r:s} [{1:.6g}, .., {2:.6g}] '
'outside spectral range [{3:.6g}, .., {4:.6g}]'
.format(band.name, band.wave[0], band.wave[-1],
self._wave[0], self._wave[-1]))
x1 = self._parameters[1]
# integrate m0 and m1 components
wave, dwave = integration_grid(band.minwave(), band.maxwave(),
MODEL_BANDFLUX_SPACING)
trans = band(wave)
m0 = self._model['M0'](phase, wave)
m1 = self._model['M1'](phase, wave)
tmp = trans * wave
f0 = np.sum(m0 * tmp, axis=1) * dwave / HC_ERG_AA
m1int = np.sum(m1 * tmp, axis=1) * dwave / HC_ERG_AA
ftot = f0 + x1 * m1int
# In the following, the "[:,0]" reduces from a 2-d array of shape
# (nphase, 1) to a 1-d array.
lcrv00 = self._model['LCRV00'](phase, band.wave_eff)[:, 0]
lcrv11 = self._model['LCRV11'](phase, band.wave_eff)[:, 0]
lcrv01 = self._model['LCRV01'](phase, band.wave_eff)[:, 0]
scale = self._model['errscale'](phase, band.wave_eff)[:, 0]
v = lcrv00 + 2.0 * x1 * lcrv01 + x1 * x1 * lcrv11
# v is supposed to be variance but can go negative
# due to interpolation. Correct negative values to some small
# number. (at present, use prescription of snfit : set
# negatives to 0.0001)
v[v < 0.0] = 0.0001
# avoid warnings due to evaluating 0. / 0. in f0 / ftot
with np.errstate(invalid='ignore'):
#result = v * (f0 / ftot)**2 * scale**2
result = v/(ftot/(trans*wave*dwave).sum())/HC_ERG_AA/1e12
# treat cases where ftot is negative the same as snfit
result[ftot <= 0.0] = 10000.
return result
def bandflux_rcov(self, band, phase):
"""Return the *relative* model covariance (or "model error") on
synthetic photometry generated from the model in the given restframe
band(s).
This model covariance represents the scatter of real SNe about
the model. The covariance matrix has two components. The
first component is diagonal (pure variance) and depends on the
phase :math:`t` and bandpass central wavelength
:math:`\\lambda_c` of each photometry point:
.. math::
(F_{0, \\mathrm{band}}(t) / F_{1, \\mathrm{band}}(t))^2
S(t, \\lambda_c)^2
(V_{00}(t, \\lambda_c) + 2 x_1 V_{01}(t, \\lambda_c) +
x_1^2 V_{11}(t, \\lambda_c))
where the 2-d functions :math:`S`, :math:`V_{00}`, :math:`V_{01}`,
and :math:`V_{11}` are given by the files ``errscalefile``,
``lcrv00file``, ``lcrv01file``, and ``lcrv11file``
respectively and :math:`F_0` and :math:`F_1` are given by
.. math::
F_{0, \\mathrm{band}}(t) = \\int_\\lambda M_0(t, \\lambda)
T_\\mathrm{band}(\\lambda)
\\frac{\\lambda}{hc} d\\lambda
.. math::
F_{1, \\mathrm{band}}(t) = \\int_\\lambda
(M_0(t, \\lambda) + x_1 M_1(t, \\lambda))
T_\\mathrm{band}(\\lambda)
\\frac{\\lambda}{hc} d\\lambda
As this first component can sometimes be negative due to
interpolation, there is a floor applied wherein values less than zero
are set to ``0.01**2``. This is to match the behavior of the
original SALT2 code, snfit.
The second component is block diagonal. It has
constant covariance between all photometry points within a
bandpass (regardless of phase), and no covariance between
photometry points in different bandpasses:
.. math::
CD(\\lambda_c)^2
where the 1-d function :math:`CD` is given by the file ``cdfile``.
Adding these two components gives the *relative* covariance on model
photometry.
Parameters
----------
band : `~numpy.ndarray` of `~sncosmo.Bandpass`
Bandpasses of observations.
phase : `~numpy.ndarray` (float)
Phases of observations.
Returns
-------
rcov : `~numpy.ndarray`
Model relative covariance for given bandpasses and phases.
"""
# construct covariance array with relative variance on diagonal
diagonal = np.zeros(phase.shape, dtype=np.float64)
for b in set(band):
mask = band == b
diagonal[mask] = self._bandflux_rvar_single(b, phase[mask])
result = np.diagflat(diagonal)
# add kcorr errors
for b in set(band):
mask1d = band == b
mask2d = mask1d * mask1d[:, None] # mask for result array
kcorrerr = self._colordisp(b.wave_eff)
result[mask2d] += kcorrerr**2
return result
def _set_colorlaw_from_file(self, name_or_obj):
"""Read color law file and set the internal colorlaw function."""
# Read file
if isinstance(name_or_obj, str):
f = open(name_or_obj, 'r')
else:
f = name_or_obj
words = f.read().split()
f.close()
# Get colorlaw coeffecients.
ncoeffs = int(words[0])
colorlaw_coeffs = [float(word) for word in words[1: 1 + ncoeffs]]
# If there are more than 1+ncoeffs words in the file, we expect them to
# be of the form `keyword value`.
version = 0
colorlaw_range = [3000., 7000.]
for i in range(1+ncoeffs, len(words), 2):
if words[i] == 'Salt2ExtinctionLaw.version':
version = int(words[i+1])
elif words[i] == 'Salt2ExtinctionLaw.min_lambda':
colorlaw_range[0] = float(words[i+1])
elif words[i] == 'Salt2ExtinctionLaw.max_lambda':
colorlaw_range[1] = float(words[i+1])
else:
raise RuntimeError("Unexpected keyword: {}".format(words[i]))
# Set extinction function to use.
if version == 0:
raise Exception("Salt2ExtinctionLaw.version 0 not supported.")
elif version == 1:
self._colorlaw = SALT2ColorLaw(colorlaw_range, colorlaw_coeffs)
else:
raise Exception('unrecognized Salt2ExtinctionLaw.version: ' +
version)
def colorlaw(self, wave=None):
"""Return the value of the CL function for the given wavelengths.
Parameters
----------
wave : float or list_like
Returns
-------
colorlaw : float or `~numpy.ndarray`
Values of colorlaw function, which can be interpreted as extinction
in magnitudes.
"""
if wave is None:
wave = self._wave
else:
wave = np.asarray(wave)
if wave.ndim == 0:
return self._colorlaw(np.ravel(wave))[0]
else:
return self._colorlaw(wave) | /saltshaker-sn-1.2.tar.gz/saltshaker-sn-1.2/saltshaker/util/salt3_sncosmo.py | 0.808029 | 0.324516 | salt3_sncosmo.py | pypi |
from saltshaker.training import datamodels
import jax
from jax import numpy as jnp
from jax.experimental import sparse
import numpy as np
from scipy import optimize, stats
import warnings
def optimizepaddingsizes(numbatches,datasizes):
"""Given a set of the sizes of batches of data, and a number of batches to divide them into, each of which is zero-padded, determine the zero-paddings that will minimize the additional space required to store the results
"""
#Transform a set of n-1 unconstrained parameters into n+1 bin edges, starting at 0 and monotonically increasing to 1
def parstobins(pars,largest):
#Probably a better transform to use here!
pars=np.abs(pars)
pars=np.concatenate([[0],pars,[1.]])
bins=np.cumsum(pars/pars.sum())
bins*=largest
bins[-1]+=.1
return bins
#Define the loss function to be used here: defined here as just the space required to store the result
def loss(pars):# binlocs=[14,60]
bins=parstobins(pars,max(datasizes))
spacerequired=stats.binned_statistic(datasizes, datasizes,statistic='count', bins=bins).statistic* np.floor(bins[1:])
return spacerequired.sum()
ndim=numbatches-1
#Integer programming doesn't seem to have a particularly easy implementation in python
#I use Nelder-Mead method since it doesn't use gradients, and it gets reasonable results
#Starting vertices are chosen based on a fixed rng seed to eliminate reproducibility error
vertices=np.random.default_rng(seed=13241245642435).random(size=(ndim+1,ndim))
result=optimize.minimize(loss,[0]*ndim, method='Nelder-Mead', options= {
'initial_simplex':vertices})
pars=result.x
bins=parstobins(pars,max(datasizes))
padsizes= stats.binned_statistic(datasizes, datasizes,statistic= lambda x: x.max() if x.size>0 else 0, bins=bins).statistic
padsizes[np.isnan(padsizes)]=0
padsizes=padsizes.astype(int)
finalspacecost=(stats.binned_statistic(datasizes, datasizes,statistic='count', bins=bins).statistic* padsizes).sum()
padsizes=padsizes[np.nonzero(padsizes)]
return padsizes,sum(datasizes)/finalspacecost
def batchdatabysize(data):
""" Given a set of data, divide them into batches each of a fixed size, for easy use with jax's batching methods. Quantities that are marked as 'mapped' by their objects will be turned into arrays, otherwise the values are tested for equality and given as unmapped objects """
batcheddata={ }
for x in data:
#determine length of each piece of data
key=len(x)
#assign data to an appropriate entry in dictionary based on length
if key in batcheddata:
batcheddata[key] += [x]
else:
batcheddata[key] =[x]
#Given a batch of data, unpacks it and stacks it along the first axis for use with jax's vmap method
def repackforvmap(data):
__ismapped__=data[0].__ismapped__
#Given n data with m attributes, this yields an n x m list of lists
unpacked=[x.unpack() for x in data]
#Want to convert that into an m-element list of n-element arrays or single values
for j,varname in enumerate( data[0].__slots__):
vals=([(unpacked[i][j]) for i in range(len(unpacked))])
#If it's a sparse array, concatenate along new "batched" axis for use with vmap
if isinstance(vals[0],sparse.BCOO) :
yield sparse.bcoo_concatenate([x.reshape((1,*x.shape)).update_layout(n_batch=1) for x in vals] ,dimension=0)
else:
if not (varname in __ismapped__ ):
#If an attribute is not to be mapped over, the single value is set, and it is verified that it is the same for all elements
assert(np.all(vals[0]==vals), "Unmapped quantity different between different objects")
yield vals[0]
else:
yield np.stack(vals,axis=0)
#Returns a list of batches of data suitable for use with the batchedmodelfunctions function
return [list(repackforvmap(x)) for x in batcheddata.values()]
def walkargumenttree(x,targetsize,ncalls=0):
""" Walks argument trees checking for whether the first axis of each leaf matches the size of the mapped axes; if so return 0, otherwise None"""
if isinstance(x,dict):
return {key:walkargumenttree(val,targetsize,ncalls+1) for key,val in x.items()}
elif hasattr(x,'shape') :
if len(x.shape)>0 and x.shape[0]==targetsize:
return 0
else: return None
elif hasattr(x,'__len__'):
if len(x)==targetsize and ncalls>0:
return 0
else :
return type(x)(walkargumenttree(y,targetsize,ncalls+1) for y in x )
else : return None
def batchedmodelfunctions(function,batcheddata, dtype,flatten=False,sum=False):
"""Constructor function to map a function that takes a modeledtrainingdata object as first arg and a SALTparameters object as second arg over batched, zero-padded data"""
if issubclass(dtype,datamodels.modeledtrainingdata) :
pass
elif dtype in ['light-curves','spectra']:
if dtype=='light-curves':
dtype=datamodels.modeledtraininglightcurve
else:
dtype=datamodels.modeledtrainingspectrum
else:
raise ValueError(f'Invalid datatype {dtype}')
batchedindexed= [dict([(x,y) for x,y in zip(dtype.__slots__,batch) if x in
dtype.__indexattributes__ ]) for batch in batcheddata]
def vectorized(pars,*batchedargs,**batchedkwargs):
if sum: result=0
else: result=[]
if len(batchedargs)==0:
batchedargs=[[]]*len(batcheddata)
else:
batchedargs= list(zip(*[x if hasattr(x,'__len__') else [x]*len(batcheddata) for x in batchedargs]))
if (batchedkwargs)=={}:
batchedkwargs=[{}]*len(batcheddata)
else:
batchedkwargs= [{y:x[i] if hasattr(x,'__len__') else x for y,x in batchedkwargs.items()}
for i in range(len(batcheddata))]
for batch,indexdict,args,kwargs in zip(batcheddata,batchedindexed,batchedargs,batchedkwargs):
def funpacked (lc,pars,kwargs,*args):
lc= dtype.repack(lc)
pars=datamodels.SALTparameters.tree_unflatten((),pars)
return function(lc,pars,*args,**kwargs)
# import pdb;pdb.set_trace()
newpars=datamodels.SALTparameters(indexdict, pars)
parsvmapped=newpars.tree_flatten()[0]
newargs=kwargs,*args
#Determine which axes of the arguments correspond to the lightcurve data
targetsize=indexdict['ix0'].size
#The batched data has prenoted which arguments are to be mapped over; otherwise need to attempt to determine it programatically
# try:
inaxes= [(0 if (x in dtype.__ismapped__) else None) for x in dtype.__slots__],newpars.mappingaxes,*walkargumenttree(newargs,targetsize)
mapped=jax.vmap( funpacked,in_axes=
inaxes
)(
batch,list(parsvmapped),*newargs
)
# except Exception as e:
# print(e)
# import pdb;pdb.set_trace()
if flatten: mapped=mapped.flatten()
if sum:
result+=mapped.sum()
else:
result+=[mapped ]
if flatten:
return jnp.concatenate(result)
else:
return result
return vectorized | /saltshaker-sn-1.2.tar.gz/saltshaker-sn-1.2/saltshaker/util/batching.py | 0.558086 | 0.677714 | batching.py | pypi |
# scopes: https://developers.google.com/identity/protocols/oauth2/scopes
# sending: https://developers.google.com/gmail/api/guides/sending
from __future__ import print_function
import base64
import json
import os.path
from email.message import EmailMessage
from enum import Enum
from pathlib import Path
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
from google_auth_oauthlib.flow import InstalledAppFlow
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
from src.logger import get_logger
logger = get_logger(__name__, "./gmailer.log")
# If modifying these scopes, delete the file token.json.
SCOPES = [
"https://www.googleapis.com/auth/gmail.modify",
"https://www.googleapis.com/auth/gmail.labels",
"https://www.googleapis.com/auth/gmail.settings.basic",
# "https://www.googleapis.com/auth/gmail.metadata",
]
class Labels(Enum):
"""represents gmail static label names"""
CHAT = "CHAT"
SENT = "SENT"
INBOX = "INBOX"
IMPORTANT = "IMPORTANT"
TRASH = "TRASH"
DRAFT = "DRAFT"
SPAM = "SPAM"
STARRED = "STARRED"
UNREAD = "UNREAD"
class Mailer:
"""represents a mailer service"""
def __init__(self, service=None):
self._service = service
@property
def service_user(self):
"""returns service.users"""
return self.service.users()
@property
def service_settings(self):
"""returns service.users.settings"""
return self.service_user.settings()
@property
def service(self, **kwargs):
"""returns a bootstrapped service if one does not already exist"""
if self._service is None:
token_path = kwargs.get("token_path", "./creds/token.json")
credentials_path = kwargs.get(
"credentials_path", "./creds/credentials.json"
)
creds = None
# The file token.json stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists(token_path):
creds = Credentials.from_authorized_user_file(token_path, SCOPES)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
if not Path(credentials_path).exists():
raise FileNotFoundError(
"credentials.json is missing... https://console.cloud.google.com"
)
flow = InstalledAppFlow.from_client_secrets_file(
credentials_path, SCOPES
)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open(token_path, "w", encoding="utf-8") as token:
token.write(creds.to_json())
self._service = build("gmail", "v1", credentials=creds)
return self._service
def modify_labels(self, messages, remove_labels=None, add_labels=None):
"""update lables for a list of messages"""
if len(messages) == 0:
return []
try:
body = {
"ids": list(message["id"] for message in messages),
"removeLabelIds": remove_labels if remove_labels else [],
"addLabelIds": add_labels if add_labels else [],
}
getattr(self.service_user, "messages")().batchModify(
userId="me",
body=body,
).execute()
except HttpError as error:
logger.error("An error occurred: %s", error)
return []
@property
def labels(self):
"""returns list of labels objects"""
try:
return (
getattr(self.service_user, "labels")()
.list(userId="me")
.execute()
.get("labels", [])
)
except HttpError as error:
print(f"An error occurred: {error}")
return []
@property
def filters(self):
"""returns a list of filter objects"""
return (
self.service.users()
.settings()
.filters()
.list(userId="me")
.execute()
.get("filter", [])
)
def search(self, search_query):
"""searches all messages against Google style search query"""
return (
getattr(self.service_user, "messages")()
.list(
userId="me",
q=" ".join(search_query),
)
.execute()
.get("messages", [])
)
@property
def threads(self):
"""returns a list of thread objects"""
try:
threads = (
getattr(self.service_user, "threads")()
.list(userId="me")
.execute()
.get("threads", [])
)
return (
json.dumps(
self.service.users()
.threads()
.get(userId="me", id=thread["id"])
.execute()
)
for thread in threads
)
except HttpError as error:
print(f"error {error}")
return []
@property
def messages(self):
"""returns a list of message objects"""
try:
messages = (
getattr(self.service_user, "messages")()
.list(userId="me")
.execute()
.get("messages", [])
)
return (
self.service.users()
.messages()
.get(userId="me", id=message["id"])
.execute()
for message in messages
)
except HttpError as error:
print(f"error {error}")
return []
def create_draft(self):
"""Create and insert a draft email.
Print the returned draft's message and id.
Returns: Draft object, including draft id and message meta data.
Load pre-authorized user credentials from the environment.
TODO(developer) - See https://developers.google.com/identity
for guides on implementing OAuth2 for the application.
"""
# creds, _ = google.auth.default()
try:
# create gmail api client
message = EmailMessage()
assert message is not None
message.set_content("This is automated draft mail")
message["To"] = "hllbz86@gmail.com"
message["From"] = "hllbz86@gmail.com"
message["Subject"] = "Automated draft"
# encoded message
encoded_message = base64.urlsafe_b64encode(message.as_bytes()).decode()
create_message = {"message": {"raw": encoded_message}}
# pylint: disable=E1101
draft = (
self.service.users()
.drafts()
.create(userId="me", body=create_message)
.execute()
)
logger.info(f'Draft id: {draft["id"]}\nDraft message: {draft["message"]}')
except HttpError as error:
print(f"An error occurred: {error}")
draft = None
return draft
def create_filter(self, name):
"""create_filter Not implemented
Args:
name (str): the name of the filter
Returns:
None:
"""
label_name = name
filter_content = {
"criteria": {"from": "gsuder1@workspacesamples.dev"},
"action": {"addLabelIds": [label_name], "removeLabelIds": ["INBOX"]},
}
# pylint: disable=E1101
result = (
self.service.users()
.settings()
.filters()
.create(userId="me", body=filter_content)
.execute()
)
return result.get("id") | /salty_gmailer-0.0.2-py3-none-any.whl/src/mailer.py | 0.628749 | 0.198685 | mailer.py | pypi |
import asyncio
import binascii
import logging
import ssl
# noinspection PyUnresolvedReferences
from typing import Coroutine # noqa
from typing import (
Any,
Awaitable,
Callable,
List,
Mapping,
Optional,
TypeVar,
cast,
)
import libnacl
import libnacl.public
from .typing import (
LogbookLevel,
Logger,
LoggingLevel,
NoReturn,
ServerSecretPermanentKey,
)
__all__ = (
'logger_group',
'enable_logging',
'disable_logging',
'get_logger',
'consteq',
'create_ssl_context',
'load_permanent_key',
'cancel_awaitable',
'log_exception',
)
# Do not export!
T = TypeVar('T')
# noinspection PyPropertyDefinition
def _logging_error(*_: Any, **__: Any) -> NoReturn:
raise ImportError('Please install saltyrtc.server[logging] for logging support')
_logger_redirect_handler = None # type: Optional[logbook.compat.RedirectLoggingHandler]
_logger_convert_level_handler = None # type: Optional[logbook.compat.LoggingHandler]
try:
# noinspection PyUnresolvedReferences
import logbook
except ImportError:
class _Logger:
"""
Dummy logger in case :mod:`logbook` is not present.
"""
def __init__(self, name: str, level: Optional[int] = 0) -> None:
self.name = name
self.level = level
debug = info = warn = warning = notice = error = exception = \
critical = log = lambda *a, **kw: None
# noinspection PyPropertyDefinition
class _LoggerGroup:
"""
Dummy logger group in case :mod:`logbook` is not present.
"""
def __init__(
self,
loggers: Optional[List[Any]] = None,
level: Optional[int] = 0,
processor: Optional[Any] = None,
) -> None:
self.loggers = loggers
self.level = level
self.processor = processor
disabled = property(lambda *_, **__: True, _logging_error)
add_logger = remove_logger = process_record = _logging_error
logger_group = _LoggerGroup()
else:
_Logger = logbook.Logger # type: ignore
_logger_redirect_handler = logbook.compat.RedirectLoggingHandler()
_logger_convert_level_handler = logbook.compat.LoggingHandler()
logger_group = logbook.LoggerGroup()
logger_group.disabled = True
def _convert_level(logbook_level: LogbookLevel) -> LoggingLevel:
"""
Convert a :mod:`logbook` level to a :mod:`logging` level.
Arguments:
- `logging_level`: A :mod:`logbook` level.
Raises :class:`ImportError` in case :mod:`logbook` is not
installed.
"""
if _logger_convert_level_handler is None:
_logging_error()
return LoggingLevel(_logger_convert_level_handler.convert_level(logbook_level))
def _redirect_logging_loggers(
wrapped_loggers: Mapping[str, LogbookLevel],
remove: Optional[bool] = False,
) -> None:
"""
Enable logging and redirect :mod:`logging` loggers to
:mod:`logbook`.
Arguments:
- `wrapped_loggers`: A dictionary containing :mod:`logging`
logger names as key and the targeted :mod:`logbook` logging
level as value. These loggers will be redirected to logbook.
- `remove`: Flag to remove the redirect handler from each
logger instead of adding it.
Raises :class:`ImportError` in case :mod:`logbook` is not
installed.
"""
if _logger_redirect_handler is None:
_logging_error()
# At this point, logbook is either defined or an error has been returned
for name, level in wrapped_loggers.items():
# Lookup logger and translate level
logger = logging.getLogger(name)
logger.setLevel(_convert_level(level))
# Add or remove redirect handler.
if remove:
logger.removeHandler(_logger_redirect_handler)
else:
logger.addHandler(_logger_redirect_handler)
def enable_logging(
level: Optional[LogbookLevel] = None,
redirect_loggers: Optional[Mapping[str, LogbookLevel]] = None,
) -> None:
"""
Enable logging for the *saltyrtc* logger group.
Arguments:
- `level`: A :mod:`logbook` logging level. Defaults to
``WARNING``.
- `redirect_loggers`: A dictionary containing :mod:`logging`
logger names as key and the targeted :mod:`logbook` logging
level as value. Each logger will be looked up and redirected
to :mod:`logbook`. Defaults to an empty dictionary.
Raises :class:`ImportError` in case :mod:`logbook` is not
installed.
"""
if _logger_convert_level_handler is None:
_logging_error()
# At this point, logbook is either defined or an error has been returned
if level is None:
level = logbook.WARNING
logger_group.disabled = False
logger_group.level = level
if redirect_loggers is not None:
_redirect_logging_loggers(redirect_loggers, remove=False)
def disable_logging(
redirect_loggers: Optional[Mapping[str, LogbookLevel]] = None,
) -> None:
"""
Disable logging for the *saltyrtc* logger group.
Arguments:
- `level`: A :mod:`logbook` logging level.
- `redirect_loggers`: A dictionary containing :mod:`logging`
logger names as key and the targeted :mod:`logbook` logging
level as value. Each logger will be looked up and removed
from the redirect handler. Defaults to an empty dictionary.
Raises :class:`ImportError` in case :mod:`logbook` is not
installed.
"""
logger_group.disabled = True
if redirect_loggers is not None:
_redirect_logging_loggers(redirect_loggers, remove=True)
def get_logger(
name: Optional[str] = None,
level: Optional[LogbookLevel] = None,
) -> 'logbook.Logger':
"""
Return a :class:`logbook.Logger`.
Arguments:
- `name`: The name of a specific sub-logger. Defaults to
`saltyrtc`. If supplied, will be prefixed with `saltyrtc.`.
- `level`: A :mod:`logbook` logging level. Defaults to
:attr:`logbook.NOTSET`.
"""
if _logger_convert_level_handler is None:
_logging_error()
# At this point, logbook is either defined or an error has been returned
if level is None:
level = logbook.NOTSET
base_name = 'saltyrtc'
name = base_name if name is None else '.'.join((base_name, name))
# Create new logger and add to group
logger = logbook.Logger(name=name, level=level)
logger_group.add_logger(logger)
return logger
def consteq(left: bytes, right: bytes) -> bool:
"""
Compares two byte instances with one another. If `a` and `b` have
different lengths, return `False` immediately. Otherwise `a` and `b`
will be compared in constant time.
Return `True` in case `a` and `b` are equal. Otherwise `False`.
Raises :exc:`TypeError` in case `a` and `b` are not both of the type
:class:`bytes`.
"""
return libnacl.bytes_eq(left, right)
def create_ssl_context(
certfile: str,
keyfile: Optional[str] = None,
dh_params_file: Optional[str] = None,
) -> ssl.SSLContext:
"""
Create and return a :class:`ssl.SSLContext` for the server.
The settings are chosen by the :mod:`ssl` module, and usually
represent a higher security level than when calling the
:class:`ssl.SSLContext` constructor directly.
Arguments:
- `certfile`: Path to a file in PEM format containing the
SSL certificate of the server.
- `keyfile`: Path to a file that contains the private key.
Will be read from `certfile` if not present.
- `dh_params_file`: Path to a file in PEM format containing
Diffie-Hellman parameters for DH(E) and ECDH(E). Optional
but highly recommended.
"""
ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_context.load_cert_chain(certfile=certfile, keyfile=keyfile)
if dh_params_file is not None:
ssl_context.load_dh_params(dh_params_file)
return ssl_context
def load_permanent_key(key: str) -> ServerSecretPermanentKey:
"""
Decode a hex-encoded NaCl private permanent key or read it from a
file.
Arguments:
- `key`: A hex-encoded 32 bytes private permanent key or the
name of a file which contains the key.
Raises :exc:`ValueError` in case the key could not be found or
is not a valid hex-encoded NaCl private key.
Return a:class:`libnacl.public.SecretKey` instance.
"""
# Read key file (if any)
try:
with open(key) as file:
key = file.readline().strip()
except IOError:
pass
# Un-hexlify
try:
key_bytes = binascii.unhexlify(key)
except binascii.Error as exc:
raise ValueError('Could not decode key') from exc
# Convert to private key (raises ValueError on its own)
return ServerSecretPermanentKey(libnacl.public.SecretKey(sk=key_bytes))
def cancel_awaitable(
awaitable: Awaitable[Any],
log: Logger,
done_cb: Optional[Callable[[Awaitable[Any]], Any]] = None
) -> None:
"""
Cancel a coroutine or a :class:`asyncio.Task`.
Arguments:
- `coroutine_or_task`: The coroutine or
:class:`asyncio.Task` to be cancelled.
- `done_cb`: An optional callback to be called once the task
has been cancelled. Will be called immediately if
`coroutine_or_task` is a coroutine.
"""
if asyncio.iscoroutine(awaitable):
coroutine = cast(Coroutine[Any, Any, None], awaitable)
log.debug('Closing coroutine {}', coroutine)
coroutine.close()
if done_cb is not None:
done_cb(coroutine)
else:
task = cast('asyncio.Task[None]', awaitable)
# A cancelled task can still contain an exception, so we try to
# fetch that first to avoid having the event loop's exception
# handler yelling at us.
try:
exc = task.exception()
except asyncio.CancelledError:
log.debug('Already cancelled task {}', task)
except asyncio.InvalidStateError:
log.debug('Cancelling task {}', task)
task.cancel()
else:
if exc is not None:
log.debug('Ignoring completion of task {} with {}', task, task.result())
else:
log.debug('Ignoring exception of task {}: {}', task, repr(exc))
if done_cb is not None:
# noinspection PyTypeChecker
task.add_done_callback(done_cb)
async def log_exception(
awaitable: Awaitable[T],
log_handler: Callable[[Exception], None],
) -> T:
"""
Forward the stack trace of an awaitable's uncaught exception to a
log handler.
.. note:: This will not forward :exc:`asyncio.CancelledError`.
Arguments:
- `awaitable`: A coroutine, task or future.
- `log_handler`: A callable logging the exception.
"""
try:
return await awaitable
except asyncio.CancelledError:
raise
except Exception as exc:
log_handler(exc)
raise | /saltyrtc.server-5.0.1.tar.gz/saltyrtc.server-5.0.1/saltyrtc/server/util.py | 0.811825 | 0.176459 | util.py | pypi |
from typing import (
TYPE_CHECKING,
Any,
Awaitable,
Callable,
List,
MutableMapping,
NewType,
Optional,
Tuple,
TypeVar,
Union,
)
import libnacl.public
if TYPE_CHECKING:
# noinspection PyUnresolvedReferences
import logbook # noqa
# noinspection PyUnresolvedReferences
from .events import Event # noqa
__all__ = (
'NoReturn',
'ListOrTuple',
'PathHex',
'ServerPublicPermanentKey',
'InitiatorPublicPermanentKey',
'ResponderPublicSessionKey',
'ClientPublicKey',
'SequenceNumber',
'OutgoingSequenceNumber',
'IncomingSequenceNumber',
'ServerCookie',
'ClientCookie',
'ChosenSubProtocol',
'PingInterval',
'SignedKeys',
'MessageId',
'DisconnectedData',
'EventData',
'EventCallback',
'Nonce',
'Packet',
'EncryptedPayload',
'RawPayload',
'Payload',
'ServerSecretPermanentKey',
'ServerSecretSessionKey',
'MessageBox',
'SignBox',
'Job',
'Result',
'Logger',
'LogbookLevel',
'LoggingLevel',
)
# Unconstrained type variables
# Important: Do not export!
T = TypeVar('T') # Any type
# NoReturn
try:
from typing import NoReturn
except ImportError:
NoReturn = None
# Helpers
# -------
# List or Tuple
ListOrTuple = Union[List[T], Tuple[T]]
# Common
# ------
# The path in hexadecimal representation
PathHex = NewType('PathHex', str)
# One of the server's public keys (chosen by the client)
ServerPublicPermanentKey = NewType('ServerPublicPermanentKey', bytes)
# The initiator's public permanent key
InitiatorPublicPermanentKey = NewType('InitiatorPublicPermanentKey', bytes)
# The responder's public session key
ResponderPublicSessionKey = NewType('ResponderPublicSessionKey', bytes)
# The client's public key is either
# a) the initiator's public permanent key (during the handshake), or
# b) the client's public session key (updated after the handshake).
ClientPublicKey = Union[InitiatorPublicPermanentKey, ResponderPublicSessionKey]
# An incoming or outgoing sequence number
SequenceNumber = NewType('SequenceNumber', int)
# The server's outgoing sequence number
OutgoingSequenceNumber = NewType('OutgoingSequenceNumber', SequenceNumber)
# The client's incoming sequence number
IncomingSequenceNumber = NewType('IncomingSequenceNumber', SequenceNumber)
# The server's chosen cookie
ServerCookie = NewType('ServerCookie', bytes)
# The client's chosen cookie
ClientCookie = NewType('ClientCookie', bytes)
# One of the sub-protocols the client offered during the websocket
# sub-protocol negotiation
ChosenSubProtocol = NewType('ChosenSubProtocol', str)
# The negotiated ping interval
PingInterval = NewType('PingInterval', int)
# Signed keys to be provided to the user
SignedKeys = NewType('SignedKeys', bytes)
# The message id of a message that is in the progress of being relayed
MessageId = NewType('MessageId', bytes)
# Events
# ------
# Data provided to the registered callbacks
DisconnectedData = NewType('DisconnectedData', int)
EventData = Union[
None, # `initiator-connected` / `responder-connected`
DisconnectedData,
]
# The event callback as provided by the user
EventCallback = Callable[['Event', Optional[PathHex], EventData], Awaitable[None]]
# Message
# -------
# Nonce
Nonce = NewType('Nonce', bytes)
# A packet including nonce and payload as bytes as received/sent
Packet = NewType('Packet', bytes)
# An encrypted payload (can be decrypted to a raw payload)
EncryptedPayload = NewType('EncryptedPayload', bytes)
# A raw payload (can be unpacked to a payload)
RawPayload = NewType('RawPayload', bytes)
# The payload as expected by the protocol (always dict-like in our implementation)
Payload = MutableMapping[str, Any]
# Protocol
# --------
# One of the server's secret permanent key pairs
ServerSecretPermanentKey = NewType('ServerSecretPermanentKey', libnacl.public.SecretKey)
# The server's secret session key pair
ServerSecretSessionKey = NewType('ServerSecretSessionKey', libnacl.public.SecretKey)
# Box for encrypting/decrypting messages
MessageBox = NewType('MessageBox', libnacl.public.Box)
# Box for "signing" the keys in the 'server-auth' message
SignBox = NewType('SignBox', libnacl.public.Box)
# A job of the job queue
Job = Awaitable[None]
# Task
# ----
# A consolidated result
Result = NewType('Result', BaseException)
# Util
# ----
# :mod:`logbook` Logger abstraction
Logger = Any
# A :mod:`logbook` log level
LogbookLevel = NewType('LogbookLevel', int)
# A :mod:`logging` log level
LoggingLevel = NewType('LoggingLevel', int) | /saltyrtc.server-5.0.1.tar.gz/saltyrtc.server-5.0.1/saltyrtc/server/typing.py | 0.861974 | 0.156685 | typing.py | pypi |
from salure_helpers.salureconnect import SalureConnect
from azure.storage.blob import BlobServiceClient, BlobClient, ContainerClient, generate_account_sas, ResourceTypes, AccountSasPermissions
from typing import Union, List
from datetime import datetime, timedelta
class BlobStorage(SalureConnect):
def __init__(self, label: Union[str, List]):
super().__init__()
self.blob_service_client = self.__get_authentication(label=label)
def __get_authentication(self, label):
credentials = self.get_system_credential(system='azure-blob-storage', label=label)
storage_account_name = credentials['storage_account_name']
storage_account_key = credentials['storage_account_key']
sas_token = generate_account_sas(
account_name=storage_account_name,
account_key=storage_account_key,
resource_types=ResourceTypes(service=True, container=True, object=True),
permission=AccountSasPermissions(read=True, write=True, list=True, delete=True, add=True, create=True, update=True, process=True),
expiry=datetime.utcnow() + timedelta(hours=1)
)
blob_service_client = BlobServiceClient(
account_url=f"https://{storage_account_name}.blob.core.windows.net",
credential=sas_token
)
return blob_service_client
def get_containers(self):
all_containers = self.blob_service_client.list_containers(include_metadata=True)
container_list = []
for container in all_containers:
container_info = {
'name': container.name,
'last_modified': container.last_modified,
'etag': container.etag,
'lease_state': container.lease,
'has_immutability_policy': container.has_immutability_policy,
'has_legal_hold': container.has_legal_hold,
'metadata': container.metadata
}
container_list.append(container_info)
return container_list
def get_container(self, container_name: str):
"""
Get a container from the blob storage
"""
container = self.blob_service_client.get_container_client(container_name)
return container
def create_container(self, container_name: str):
"""
Create a container in the blob storage
"""
response = self.blob_service_client.create_container(container_name)
return response
def update_container(self):
pass
def delete_container(self):
pass
def get_blobs(self):
pass
def create_blob(self):
pass
def delete_blob(self):
pass
def get_folders(self):
pass
def create_folder(self, container_name: str, folder_name: str):
"""
Create a file with a 0 as content. Because the file is created, the folder is also created. After that the file and the folder are created,
delete the file so the folder will stay. According to the azure docs, it should be possible to create empty files, but this is not working.
"""
# Split the url and add the container and folder name in between the url
original_url = self.blob_service_client.url.split('?')
url = f"{original_url[0]}/{container_name}/{folder_name}/empty_file?{original_url[1]}"
blob = BlobClient.from_blob_url(blob_url=url)
# Now create the file and delete it so the folder will stay
response = blob.upload_blob(b"0", blob_type='AppendBlob')
blob.delete_blob()
return response
def delete_folder(self):
pass | /salure_helpers_azure-1.2.1.tar.gz/salure_helpers_azure-1.2.1/salure_helpers/azure/blob_storage.py | 0.740831 | 0.159119 | blob_storage.py | pypi |
import pandas as pd
from salure_helpers.salure_functions import SalureFunctions
class DatevMapping:
@staticmethod
def map_iso2_codes_to_datev(data: pd.Series, default=None):
"""
This function is meant to map ISO2 codes to the corresponding DATEV country code.
:param data: input with ISO2 codes
:param default: optional default value in case a key does not exist in the mapping. If left to None, will return original value
:return: mapped values
"""
mapping = {
"AF": "423",
"AL": "121",
"DZ": "221",
"AD": "123",
"AO": "223",
"AG": "320",
"AR": "323",
"AM": "422",
"AU": "523",
"AT": "151",
"AZ": "425",
"BS": "324",
"BH": "424",
"BD": "460",
"BB": "322",
"BY": "169",
"BE": "124",
"BZ": "330",
"BJ": "229",
"BT": "426",
"BO": "326",
"BA": "122",
"BW": "227",
"BR": "327",
"IO": "595",
"BN": "429",
"BG": "125",
"BF": "258",
"BI": "291",
"CV": "242",
"KH": "446",
"CM": "262",
"CA": "348",
"CF": "289",
"TD": "284",
"CL": "332",
"CN": "479",
"CO": "349",
"KM": "244",
"CD": "246",
"CG": "245",
"CR": "334",
"CI": "231",
"HR": "130",
"CU": "351",
"CY": "181",
"CZ": "164",
"DK": "126",
"DJ": "230",
"DM": "333",
"DO": "335",
"EC": "336",
"EG": "287",
"SV": "337",
"GQ": "274",
"ER": "224",
"EE": "127",
"SZ": "281",
"ET": "225",
"FJ": "526",
"FI": "128",
"FR": "129",
"GA": "236",
"GM": "237",
"GE": "430",
"DE": "000",
"GH": "238",
"GI": "195",
"GR": "134",
"GD": "340",
"GT": "345",
"GN": "261",
"GW": "259",
"GY": "328",
"HT": "346",
"VA": "167",
"HN": "347",
"HK": "411",
"HU": "165",
"IS": "136",
"IN": "436",
"ID": "437",
"IR": "439",
"IQ": "438",
"IE": "135",
"IL": "441",
"IT": "137",
"JM": "355",
"JP": "442",
"JO": "445",
"KZ": "444",
"KE": "243",
"KI": "530",
"KP": "434",
"KR": "467",
"KW": "448",
"KG": "450",
"LA": "449",
"LV": "139",
"LB": "450",
"LS": "226",
"LR": "247",
"LY": "248",
"LI": "141",
"LT": "142",
"LU": "143",
"MO": "412",
"MG": "249",
"MW": "256",
"MY": "482",
"MV": "454",
"ML": "251",
"MT": "145",
"MH": "544",
"MR": "239",
"MU": "253",
"MX": "353",
"FM": "545",
"MD": "146",
"MC": "147",
"MN": "457",
"ME": "140",
"MA": "252",
"MZ": "254",
"MM": "427",
"NA": "267",
"NR": "531",
"NP": "458",
"NL": "148",
"NZ": "536",
"NI": "354",
"NE": "255",
"NG": "232",
"MP": "525",
"NO": "149",
"OM": "456",
"PK": "461",
"PW": "537",
"PS": "459",
"PA": "357",
"PG": "538",
"PY": "359",
"PE": "361",
"PH": "462",
"PL": "152",
"PT": "153",
"QA": "447",
"MK": "144",
"RO": "154",
"RU": "160",
"RW": "265",
"KN": "370",
"LC": "366",
"VC": "369",
"WS": "543",
"SM": "156",
"ST": "268",
"SA": "472",
"SN": "269",
"RS": "170",
"SC": "271",
"SL": "272",
"SG": "474",
"SK": "155",
"SI": "131",
"SB": "524",
"SO": "273",
"ZA": "263",
"SS": "278",
"ES": "161",
"LK": "431",
"SD": "277",
"SR": "364",
"SE": "157",
"CH": "158",
"SY": "475",
"TW": "465",
"TJ": "470",
"TZ": "282",
"TH": "476",
"TG": "283",
"TO": "541",
"TT": "371",
"TN": "285",
"TR": "163",
"TM": "471",
"TV": "540",
"UG": "286",
"UA": "166",
"AE": "469",
"GB": "168",
"US": "368",
"UY": "365",
"UZ": "477",
"VU": "532",
"VE": "367",
"VN": "432",
"YE": "421",
"ZM": "257",
"ZW": "233"
}
return SalureFunctions.applymap(key=data, mapping=mapping, default=default) | /salure_helpers_datev-0.0.1.tar.gz/salure_helpers_datev-0.0.1/salure_helpers/datev/datev_mapping.py | 0.694613 | 0.499573 | datev_mapping.py | pypi |
import json
from typing import Union, List
import pandas as pd
import requests
from salure_helpers.salureconnect import SalureConnect
class Jira(SalureConnect):
def __init__(self, label: Union[str, List], debug=False):
super().__init__()
credentials = self.get_system_credential(system='jira', label=label)
self.base_url = credentials['base_url']
self.headers = {
"Authorization": f"Basic {credentials['access_token']}",
"Content-Type": "application/json"
}
self.debug = debug
def get_issues(self, jql_filter: str = None, jira_filter_id: int = None, get_extra_fields: list = None, expand_fields: list = None) -> pd.DataFrame:
"""
This method retrieves issues from Jira.
:param jql_filter: optional filter in jql format
:param jira_filter_id: optional filter id of predefined filter in jira
:param get_extra_fields: an optional list of extra fields to retrieve
:param expand_fields: an optional list of fields to expand
:return: dataframe with issues
"""
total_response = []
got_all_results = False
no_of_loops = 0
while not got_all_results:
query = {
'startAt': f'{100 * no_of_loops}',
'maxResults': '100',
'fields': ["summary", "issuetype", "timetracking", "timespent", "description", "assignee", "project"],
'fieldsByKeys': 'false'
}
if self.debug:
print(query)
if jql_filter is not None:
query['jql'] = jql_filter
if get_extra_fields is not None:
query['fields'] += get_extra_fields
if expand_fields is not None:
query['expand'] = expand_fields
if jira_filter_id is not None:
url = f"{self.base_url}rest/api/3/search/jira/filter/{jira_filter_id}"
else:
url = f"{self.base_url}rest/api/3/search"
response = requests.post(url=url, headers=self.headers, data=json.dumps(query))
if response.status_code == 200:
response_json = response.json()
no_of_loops += 1
got_all_results = False if len(response_json['issues']) == 100 else True
total_response += response_json['issues']
else:
raise ConnectionError(f"Error getting issues from Jira with message: {response.status_code, response.text}")
if self.debug:
print(f"Received {len(total_response)} issues from Jira")
df = pd.json_normalize(total_response)
return df
def get_projects(self) -> pd.DataFrame:
"""
This method retrieves projects from Jira.
:return: a dataframe with projects
"""
total_response = []
got_all_results = False
no_of_loops = 0
while not got_all_results:
query = {
'startAt': f'{50 * no_of_loops}',
'maxResults': '50',
'expand': 'description'
}
if self.debug:
print(query)
response = requests.get(f"{self.base_url}rest/api/3/project/search", headers=self.headers, params=query)
if response.status_code == 200:
response_json = response.json()
response.raise_for_status()
no_of_loops += 1
got_all_results = False if len(response_json['values']) == 50 else True
total_response += response_json['values']
else:
raise ConnectionError(f"Error getting projects from Jira with message: {response.status_code, response.text}")
if self.debug:
print(f"Received {len(total_response)} projects from Jira")
df = pd.json_normalize(total_response)
return df | /salure_helpers_jira-1.1.0.tar.gz/salure_helpers_jira-1.1.0/salure_helpers/jira/jira.py | 0.592313 | 0.166438 | jira.py | pypi |
from salure_helpers.salureconnect import SalureConnect
from typing import Union, List
import requests
import json
class Maxxton(SalureConnect):
"""
SalureConnect wrapper for Maxxton
"""
def __init__(self, label: Union[str, List] = None, test_environment: bool = False, debug=False):
super().__init__()
if test_environment:
self.base_url = 'https://api-test.maxxton.net/'
else:
self.base_url = 'https://api.maxxton.net/'
credentials = self.get_system_credential(system='maxxton', label=label)
self.client_id = credentials['client_id']
self.client_secret = credentials['client_secret']
self.scope = credentials['scope']
self.headers = {
'Content-Type': 'application/json',
'Authorization': f'Bearer {self._get_maxxton_access_token()}'
}
def create_new_employee(self, data: dict) -> requests.Response:
"""
Create a new employee in Maxxton
https://developers.maxxton.com/maxxton/v1/swagger/index.html#/Employee/createEmployees
:param data: The data of the employee
:return: The response of the request
"""
url = f'{self.base_url}maxxton/v1/employees'
return requests.post(url=url, headers=self.headers, data=json.dumps(data))
def update_employee(self, employee_id: str, data: dict) -> requests.Response:
"""
Update an existing employee in Maxxton
https://developers.maxxton.com/maxxton/v1/swagger/index.html#/Employee/updateEmployees
:param employee_id: The id of the employee
:param data: The data of the employee
:return: The response of the request
"""
url = f'{self.base_url}maxxton/v1/employees/{employee_id}'
return requests.put(url=url, headers=self.headers, data=json.dumps(data))
def _get_maxxton_access_token(self) -> str:
"""
Get the access token for Maxxton
https://developers.maxxton.com/maxxton/v1/swagger/index.html#/Authentication/authenticate
:return: The access token
"""
url = f'{self.base_url}maxxton/v1/authenticate'
params = {
'grant_type': 'client_credentials',
'client_id': self.client_id,
'client_secret': self.client_secret,
'scope': self.scope
}
response = requests.request("POST", url=url, params=params)
return response.json()['access_token'] | /salure_helpers_maxxton-0.0.2.tar.gz/salure_helpers_maxxton-0.0.2/salure_helpers/maxxton/maxxton.py | 0.777933 | 0.167219 | maxxton.py | pypi |
import json
import os
import pandas as pd
import pymysql
import warnings
from datetime import datetime
from typing import Union, List, Optional
from salure_helpers.salureconnect import SalureConnect
class MySQL(SalureConnect):
def __init__(self, label: Union[str, List] = None, return_queries: bool = False, debug=False):
"""
This class is used for connecting to a mysql database
:param label: the label of the connection
:param debug: if true, the queries will be printed
"""
# This is built in so you can use this class as a query generator for the SalureConnect Agent
super().__init__()
self.debug = debug
self.return_queries = return_queries
if return_queries:
print("Running in query return mode")
else:
if label is not None:
credentials = self.get_system_credential(system='mysql', label=label)
self.host = credentials['host']
self.user = credentials['username']
self.password = credentials['password']
self.database = credentials['schema']
self.port = 3306 if credentials['port'] is None else int(credentials['port'])
else:
self.host = os.getenv("MYSQL_HOST") if self.environment == 'prod' else os.getenv("MYSQL_STAGING_HOST")
self.user = os.getenv("MYSQL_USER")
self.password = os.getenv("MYSQL_PASSWORD")
self.database = os.getenv("MYSQL_DATABASE")
self.port = 3306 if os.getenv("MYSQL_PORT") is None else int(os.getenv("MYSQL_PORT"))
def raw_query(self, query, insert=False) -> Optional[Union[list, str]]:
"""
This method is used for sending queries to a mysql database
:param query: the query to send
:param insert: if true, the query will be executed as an insert
:return:
"""
if self.debug:
print(query)
if self.return_queries:
return query
else:
connection = pymysql.connect(host=self.host, user=self.user, password=self.password, database=self.database, port=self.port)
cursor = connection.cursor()
cursor.execute(query)
if insert:
connection.commit()
connection.close()
else:
data = cursor.fetchall()
connection.close()
return list(data)
def update(self, table: str, columns: List, values: List, filter='') -> Optional[str]:
"""
This method is used for updating a mysql database
:param table: the table to update
:param columns: the columns to update
:param values: the values to update
:param filter: filter that selects the rows to update
:return: message with the number of updated rows
"""
update_values = ''
def __map_strings(item):
if isinstance(item, str):
return '"' + str(item) + '"'
elif isinstance(item, datetime):
return '"' + item.strftime("%Y-%m-%d %H:%M:%S") + '"'
else:
return str(item)
for index in range(len(columns)):
if index != len(columns) - 1:
update_values += "`{}` = {},".format(columns[index], __map_strings(values[index]))
else:
update_values += "`{}` = {}".format(columns[index], __map_strings(values[index]))
update_values = update_values.replace('None', 'DEFAULT')
query = "UPDATE {} SET {} {};".format(table, update_values, filter)
if self.debug:
print(query)
if self.return_queries:
return query
connection = pymysql.connect(host=self.host, user=self.user, password=self.password, database=self.database, port=self.port)
cursor = connection.cursor()
resp = cursor.execute(query)
connection.commit()
connection.close()
return f"Updated {resp} rows in {table}"
def select_metadata(self, table: str) -> List:
"""
This method is used for getting the metadata of a table
:param table: the table to get the metadata from
:return: the columns of the table
"""
connection = pymysql.connect(host=self.host, user=self.user, password=self.password, database=self.database, port=self.port)
cursor = connection.cursor()
cursor.arraysize = 1
query = f"SELECT COLUMN_NAME FROM `information_schema`.`COLUMNS` WHERE `TABLE_NAME` = '{table}' AND `TABLE_SCHEMA` = '{self.database}' ORDER BY `ORDINAL_POSITION`"
if self.debug:
print(query)
cursor.execute(query)
data = cursor.fetchall()
connection.close()
# convert tuples to list
data = [column[0] for column in data]
return data
def select(self, table: str, selection: str, filter='') -> Union[List, str]:
query = f"SELECT {selection} FROM {table} {filter}"
if self.debug:
print(query)
if self.return_queries:
return query
connection = pymysql.connect(host=self.host, user=self.user, password=self.password, database=self.database, port=self.port)
cursor = connection.cursor()
cursor.arraysize = 10000
cursor.execute(query)
data = cursor.fetchall()
connection.close()
return list(data)
def insert(self, table: str, dataframe: pd.DataFrame = None, ignore_duplicates=False, on_duplicate_key_update_columns: list = None, data: [pd.DataFrame, dict, list] = None, columns: list = None):
if dataframe is not None:
data = dataframe
warnings.warn("dataframe parameter is vervangen door data parameter", DeprecationWarning)
def __map_strings(item):
return '"' + item + '"' if isinstance(item, str) else str(item)
if isinstance(data, dict):
table_headers = ', '.join(data.keys())
values = ','.join(map(__map_strings, data.values()))
elif isinstance(data, pd.DataFrame):
table_headers = ', '.join(list(data))
# Replace NA datatypes with None, which can be understood by the db as null/default
data = data.where(pd.notnull(dataframe), None).copy()
# dataframe.replace({pd.NA: None}, inplace=True)
data = data.reset_index(drop=True)
values = ','.join(str(index[1:]) for index in data.itertuples())
values = values.replace('None', 'DEFAULT')
elif isinstance(data, list):
if columns is None:
raise Exception('Columns parameter should be present when data is of type list')
table_headers = ', '.join(columns)
values = ','.join(map(__map_strings, data))
# build the query, different scenario's and datatypes require different logic
if ignore_duplicates:
query = f"""INSERT IGNORE INTO {table} ({table_headers}) VALUES {values}""" if isinstance(data, pd.DataFrame) else f"""INSERT IGNORE INTO {table} ({table_headers}) VALUES ({values})"""
elif on_duplicate_key_update_columns is not None:
on_duplicate_key_update_columns = ', '.join([f'{column} = VALUES({column})' for column in on_duplicate_key_update_columns])
query = f"""INSERT INTO {table} ({table_headers}) VALUES {values} ON DUPLICATE KEY UPDATE {on_duplicate_key_update_columns}""" if isinstance(data, pd.DataFrame) else f"""INSERT INTO {table} ({table_headers}) VALUES ({values}) ON DUPLICATE KEY UPDATE {on_duplicate_key_update_columns}"""
else:
query = f"""INSERT INTO {table} ({table_headers}) VALUES {values}""" if isinstance(data, pd.DataFrame) else f"""INSERT INTO {table} ({table_headers}) VALUES ({values})"""
if self.debug:
print(query)
if self.return_queries:
return query
connection = pymysql.connect(host=self.host, user=self.user, password=self.password, database=self.database, port=self.port)
cursor = connection.cursor()
resp = cursor.execute(query)
connection.commit()
connection.close()
return f'Inserted {resp} rows into {table}'
def delete(self, table, filter='') -> str:
query = f"DELETE FROM {table} {filter}"
if self.debug:
print(query)
if self.return_queries:
return query
connection = pymysql.connect(host=self.host, user=self.user, password=self.password, database=self.database, port=self.port)
cursor = connection.cursor()
resp = cursor.execute(query)
connection.commit()
connection.close()
return f'Deleted {resp} rows from {table}'
def create_table_if_not_exists(self, table, dataframe):
# Map dataframe datatypes to monetdb datatypes. First in set is dataframe type, second is monetdb.
datatypes = [
{'dataframe_type': 'int64', 'mysql_type': 'INT'},
{'dataframe_type': 'uint64', 'mysql_type': 'VARCHAR(255)'},
{'dataframe_type': 'object', 'mysql_type': 'VARCHAR(255)'},
{'dataframe_type': 'float64', 'mysql_type': 'FLOAT'},
{'dataframe_type': 'datetime64[ns]', 'mysql_type': 'TIMESTAMP'},
{'dataframe_type': 'bool', 'mysql_type': 'BOOLEAN'}
]
datatypes = pd.DataFrame(datatypes)
# Create a dataframe with all the types of the given dataframe
dataframe_types = pd.DataFrame({'columns': dataframe.dtypes.index, 'types': dataframe.dtypes.values})
dataframe_types = dataframe_types.to_json()
dataframe_types = json.loads(dataframe_types)
dataframe_types_columns = []
dataframe_types_types = []
for field in dataframe_types['columns']:
dataframe_types_columns.append(dataframe_types['columns'][field])
for type in dataframe_types['types']:
dataframe_types_types.append(dataframe_types['types'][type]['name'])
dataframe_types = pd.DataFrame({'columns': dataframe_types_columns, 'dataframe_type': dataframe_types_types})
columns = pd.merge(dataframe_types, datatypes, on='dataframe_type', how='left')
headers = ''
for index, row in columns.iterrows():
value = '`' + row['columns'] + '` ' + row['mysql_type']
headers += ''.join(value) + ', '
headers = headers[:-2]
query = f"CREATE TABLE IF NOT EXISTS {table} ({headers});"
if self.debug:
print(query)
if self.return_queries:
return query
connection = pymysql.connect(host=self.host, user=self.user, password=self.password, database=self.database, port=self.port)
cursor = connection.cursor()
resp = cursor.execute(query)
connection.commit()
connection.close()
return f'Updated {resp} new table in database'
def drop_table(self, table) -> str:
query = f"DROP TABLE IF EXISTS {table}"
if self.debug:
print(query)
if self.return_queries:
return query
connection = pymysql.connect(host=self.host, user=self.user, password=self.password, database=self.database, port=self.port)
cursor = connection.cursor()
resp = cursor.execute(query)
connection.commit()
connection.close()
return f'Dropped {resp} table from database'
def ping(self):
connection = pymysql.connect(host=self.host, user=self.user, password=self.password, database=self.database, port=self.port)
connection.ping() | /salure_helpers_mysql-1.0.5.tar.gz/salure_helpers_mysql-1.0.5/salure_helpers/mysql/mysql.py | 0.729038 | 0.232147 | mysql.py | pypi |
import base64
import json
import time
import sys
import pandas as pd
from urllib import parse
import requests
from requests.adapters import HTTPAdapter
from salure_helpers.salureconnect import SalureConnect
from typing import Union, List
from tenacity import retry, stop_after_attempt, wait_exponential_jitter, retry_if_exception
def is_request_exception(e: BaseException) -> bool:
if isinstance(e, requests.RequestException) and (e.response.status_code >= 500 or e.response.status_code == 408):
error = str(e)[:400].replace('\'', '').replace('\"', '')
print(f"{error}, retrying")
return True
else:
return False
class GetConnector(SalureConnect):
def __init__(self, label: Union[str, List], test_environment: bool = False, debug: bool = False):
super().__init__()
if test_environment:
self.base_url = 'resttest.afas.online'
else:
self.base_url = 'rest.afas.online'
credentials = self.get_system_credential(system='profit', label=label, test_environment=test_environment)
self.environment = credentials['environment']
base64token = base64.b64encode(credentials['token'].encode('utf-8')).decode()
self.session = requests.Session()
self.session.headers.update({'Authorization': 'AfasToken ' + base64token,
'IntegrationId': '38092_135680'})
self.debug = debug
def get_metadata(self, connector: str = None):
url = f"https://{self.environment}.{self.base_url}/profitrestservices/metainfo{f'/get/{connector}' if connector is not None else ''}"
response = self.session.get(url=url)
response = response.json()[f"{'getConnectors' if connector is None else 'fields'}"]
return response
def get_data(self, connector, fields=None, values=None, operators=None, orderbyfields=None):
"""
Possible filter operators are:
1: is gelijk aan
2: is groter of gelijk aan
3: is kleiner of gelijk aan
4: is groter dan
5: is kleiner dan
6: tekst komt voor in veld Plaats de filterwaarde tussen %..%, bijvoorbeeld %Microsoft%
7: is niet gelijk aan / Tekst komt niet voor in veld Plaats de filterwaarde tussen %..%, bijvoorbeeld %Microsoft%
8: veld is leeg Geef filterfieldid, filtervalue en operatortype op. De waarde bij filtervalue is altijd null
9: veld is niet leeg Geef filterfieldid, filtervalue en operatortype op
10 :veld begint met tekst Plaats het teken % aan het einde van de filterwaarde, bijvoorbeeld Microsoft%
12 :veld begint niet met tekst Plaats het teken % aan het einde van de filterwaarde, bijvoorbeeld Microsoft%
13 :veld eindigt met tekst Plaats het teken % aan het begin van de filterwaarde, bijvoorbeeld %Microsoft
14 :veld eindigt niet met tekst Plaats het teken % aan het begin van de filterwaarde, bijvoorbeeld %MiMicrosoft
If you use a skip and take, highly recommended to specify orderbyfields. This makes the requests much faster.
You should use unique fields or combinations of most unique fields in the dataset
Using ';' between filters is OR, ',' is AND
"""
total_response = []
loop_boolean = True
no_of_loops = 0
no_of_results = 0
if fields is not None:
parameters = {"filterfieldids": fields, "filtervalues": values, "operatortypes": operators}
else:
parameters = {}
if orderbyfields is not None:
parameters["orderbyfieldids"] = "-{}".format(orderbyfields)
url = 'https://{}.{}/profitrestservices/connectors/{}'.format(self.environment, self.base_url, connector)
while loop_boolean:
loop_parameters = {"skip": 40000 * no_of_loops, "take": 40000}
parameters.update(loop_parameters)
request = requests.Request('GET', url=url, params=parameters)
prepared_request = self.session.prepare_request(request)
response = self.do_request(prepared_request)
response_json = response.json()['rows']
no_of_loops += 1
no_of_results += len(response_json)
loop_boolean = True if len(response_json) == 40000 else False
if self.debug:
print(time.strftime('%H:%M:%S'), 'Got next connector from profit: ', connector, ' With nr of rows: ', no_of_results)
total_response += response_json
return total_response
def get_complex_filtered_data(self, connector: str, fields: list, values: list, operators: list, orderbyfields: str = None):
"""
This method is meant for complex combined filters like (a and b) or (a and c)
Possible filter operators are:
1: is gelijk aan
2: is groter of gelijk aan
3: is kleiner of gelijk aan
4: is groter dan
5: is kleiner dan
6: tekst komt voor in veld Plaats de filterwaarde tussen %..%, bijvoorbeeld %Microsoft%
7: is niet gelijk aan / Tekst komt niet voor in veld Plaats de filterwaarde tussen %..%, bijvoorbeeld %Microsoft%
8: veld is leeg Geef filterfieldid, filtervalue en operatortype op. De waarde bij filtervalue is altijd null
9: veld is niet leeg Geef filterfieldid, filtervalue en operatortype op
10 :veld begint met tekst Plaats het teken % aan het einde van de filterwaarde, bijvoorbeeld Microsoft%
12 :veld begint niet met tekst Plaats het teken % aan het einde van de filterwaarde, bijvoorbeeld Microsoft%
13 :veld eindigt met tekst Plaats het teken % aan het begin van de filterwaarde, bijvoorbeeld %Microsoft
14 :veld eindigt niet met tekst Plaats het teken % aan het begin van de filterwaarde, bijvoorbeeld %MiMicrosoft
If you use a skip and take, highly recommended to specify orderbyfields. This makes the requests much faster.
You should use unique fields or combinations of most unique fields in the dataset
Using ';' between filters is OR, ',' is AND
:param connector: name of connector
:param fields: list of filters. each listitem is one filterblock. example: ['naam, woonplaats', 'achternaam, einddatum']
:param values: list of filters. each listitem corresponds to one filterblock. example: ['Jan, Gouda', 'Janssen, 2019-01-01T00:00']
:param operators: list of filters. each listitem corresponds to one filterblock. example: ['1, 1', '1, 3']
:param orderbyfields: string of fields to order result by
:return: data in json format
"""
total_response = []
loop_boolean = True
no_of_loops = 0
no_of_results = 0
parameters = {"Filters": {"Filter": []}}
for filter_no in range(0, len(fields)):
filter = {"@FilterId": 'Filter {}'.format(filter_no + 1), "Field": []}
fields_values = fields[filter_no].split(',')
operators_values = operators[filter_no].split(',')
values_values = values[filter_no].split(',')
for number in range(0, len(fields_values)):
filter["Field"].append({"@FieldId": fields_values[number],
"@OperatorType": operators_values[number],
"#text": values_values[number]})
parameters['Filters']['Filter'].append(filter)
url = 'https://{}.{}/profitrestservices/connectors/{}'.format(self.environment, self.base_url, connector)
# eliminate whitespaces and escape special characters
querystring = parse.quote(json.dumps(parameters, separators=(',', ':')))
if orderbyfields is not None:
querystring = querystring + '&orderbyfieldids={}'.format(orderbyfields)
while loop_boolean:
loop_parameters = "&skip={}&take={}".format(40000 * no_of_loops, 40000)
request = requests.Request('GET', url=url, params="filterjson={}{}".format(querystring, loop_parameters))
prepared_request = self.session.prepare_request(request)
response = self.do_request(prepared_request)
response_json = response.json()['rows']
no_of_loops += 1
no_of_results += len(response_json)
loop_boolean = True if len(response_json) == 40000 else False
if self.debug:
print(time.strftime('%H:%M:%S'), 'Got next connector from profit: ', connector, ' With nr of rows: ', no_of_results)
total_response += response_json
return total_response
def get_dossier_attachments(self, dossieritem_id, dossieritem_guid) -> requests.Response:
"""
This method returns base64encoded binary data in the filedata key of the json response. You can process this by decoding it and writing it to a file using:
blob = base64.b64decode(response.json()['filedata'])
with open('{}/{}'.format(self.file_directory, filename), 'wb') as f:
f.write(blob)
:param dossieritem_id: dossieritem_id
:param dossieritem_guid: dossieritem_guid
:return: response object
"""
url = f"https://{self.environment}.{self.base_url}/profitrestservices/subjectconnector/{dossieritem_id}/{dossieritem_guid}"
request = requests.Request('GET', url=url)
prepared_request = self.session.prepare_request(request)
response = self.do_request(prepared_request)
return response
def download_file(self, file_id: str, filename: str, output_folder: str):
"""
This method is for downloading a file from AFAS.
:param file_id: the file_id of the file you want to download
:param filename: the filename of the file you want to download
:param output_folder: The folder where the file should be saved
"""
url = f"https://{self.environment}.{self.base_url}/profitrestservices/fileconnector/{file_id}/{filename}"
request = requests.Request('GET', url=url)
prepared_request = self.session.prepare_request(request)
response = self.do_request(prepared_request)
with open(f"{output_folder}/{filename}", 'wb') as f:
blob = base64.b64decode(response.json()['filedata'])
f.write(blob)
def download_report(self, report_id: str, output_filepath: str):
"""
This method is for downloading a report from AFAS.
:param report_id: the ID of the report you want to download
:param output_filepath: The full path (folder and filename) to where you want to store the report
"""
url = f"https://{self.environment}.{self.base_url}/profitrestservices/reportconnector/{report_id}"
request = requests.Request('GET', url=url)
prepared_request = self.session.prepare_request(request)
response = self.do_request(prepared_request)
with open(f"{output_filepath}", 'wb') as f:
blob = base64.b64decode(response.json()['filedata'])
f.write(blob)
def convert_datatypes_to_afas_datatypes(self, data: list, connector: str) -> pd.DataFrame:
"""
Takes in the response of get_data or get_complex data and converts the datatypes of the columns to the datatype
specified in the metadata of the connector.
:param data: response of get_data or get_complex_data method
:param connector: name of connector
:return: pd.DataFrame with datatypes as given in the metadata of the connector
"""
# Converts the data to a pandas dataframe and gets the metadata
data = pd.DataFrame(data)
meta_data = self.get_metadata(connector=connector)
# mapping to convert the datatypes in the metadata to the correct pandas datatypes
mapping = {
"int": "Int64",
"decimal": "float",
"string": "str",
"date": "datetime64[ns]",
"boolean": "bool",
"blob": "object"
}
# Checks if the column is in the metadata and if so adds it to the list of columns to convert
columns_to_convert = [column for column in meta_data if column["id"] in data.columns]
# Loops through the columns to convert and converts them to the correct datatype
for column in columns_to_convert:
new_type = mapping[column['dataType']]
# Looks if the column is a datetime column and checks what the control type is to assign the correct type.
# Control type are used in AFAS to determine the formatting 4 is a date, 7 is a time and 8 is a datetime.
if new_type == "datetime64[ns]":
if column["controlType"] == 4:
data[column['id']] = data[column['id']].astype(new_type).dt.date
elif column["controlType"] == 7:
data[column['id']] = data[column['id']].astype(new_type).dt.time
else:
data[column['id']] = data[column['id']].astype(new_type)
else:
data[column['id']] = data[column['id']].astype(new_type)
# returns the dataframe with the correct datatypes
return data
# this method should be used to execute all requests so retry and raising are handled in one place
@retry(stop=stop_after_attempt(5), wait=wait_exponential_jitter(initial=60, max=900), retry=retry_if_exception(is_request_exception), reraise=True)
def do_request(self, prepped_request: requests.PreparedRequest) -> requests.Response:
adapter = HTTPAdapter()
response = adapter.send(prepped_request, timeout=3000)
if response.status_code >= 400:
raise requests.exceptions.ConnectionError(f"Error occured: {response.status_code, response.text} while retrieving data for URL: {prepped_request.url}",
response=response, request=prepped_request)
return response | /salure_helpers_profit-2.1.2.tar.gz/salure_helpers_profit-2.1.2/salure_helpers/profit/profit_get.py | 0.493897 | 0.159774 | profit_get.py | pypi |
import pandas as pd
from salure_helpers.salure_functions import SalureFunctions
mapping = {
"AFG": "AF",
"USA": "US",
"USA2": "US",
"AND": "AD",
"IN": "AO",
"RA": "AR",
"AUS": "AU",
"A": "AT",
"BRN": "BH",
"BDS": "BB",
"B": "BE",
"BH": "BZ",
"DY": "BJ",
"BOL": "BO",
"RB": "BW",
"GB3": "GB",
"GB4": "GB",
"IOT": "GB",
"GB2": "GB",
"BRU": "BN",
"BU": "BF",
"RU": "BI",
"K": "KH",
"TC": "CM",
"CDN": "CA",
"RCA": "CF",
"RCH": "CL",
"RCB": "CD",
"C": "CU",
"DJI": "DJ",
"WD": "DM",
"DOM": "DO",
"ET": "EG",
"EQ": "GQ",
"ERI": "ER",
"ETH": "ET",
"FJI": "FJ",
"FIN": "FI",
"F": "FR",
"WAG": "GM",
"D": "DE",
"WG": "GR",
"GCA": "GT",
"GUY": "GY",
"RH": "HT",
"HON": "HN",
"H": "HU",
"IND": "IN",
"RI": "ID",
"IRQ": "IQ",
"IRL": "IE",
"I": "IT",
"JA": "JM",
"J": "JP",
"HKJ": "JO",
"EAK": "KE",
"KIR": "KI",
"KWT": "KW",
"KYR": "KG",
"LAO": "LA",
"RL": "LB",
"LB": "LR",
"LAR": "LY",
"FL": "LI",
"L": "LU",
"MAL": "MY",
"RMM": "ML",
"M": "MT",
"MAR": "MH",
"RIM": "MR",
"MS": "MU",
"MEX": "MX",
"MIC": "FM",
"MON": "MN",
"MNE": "ME",
"MOC": "MZ",
"BUR": "BU",
"SWA": "NA",
"NPL": "NP",
"NIC": "NI",
"RN": "NE",
"WAN": "NG",
"N": "NO",
"OMA": "OM",
"PAL": "PW",
"PSE": "PS",
"PNG": "PG",
"RP": "PH",
"P": "PT",
"KG": "QA",
"GRF": "GR",
"RUS": "RU",
"RWA": "RW",
"WS": "AS",
"RSM": "RS",
"AS": "SA",
"SRB": "CS",
"WAL": "SL",
"SGP": "SG",
"SLO": "SI",
"SP": "SB",
"ROK": "KO",
"ZSUD": "SD",
"E": "ES",
"CL": "LK",
"499": "ZZ",
"SUD": "SD",
"SME": "SM",
"S": "SE",
"CH": "SH",
"SYR": "SY",
"TAD": "TA",
"EAT": "EA",
"T": "TH",
"ZRE": "CD",
"TLS": "TL",
"TMN": "TM",
"EAU": "EA",
"000": "ZZ",
"ROU": "RO",
"OEZ": "OE",
"YV": "VE",
"WSM": "SM",
"YMN": "YM",
"Z": "ZB",
"RM": "MG"
}
class ProfitDataCleaner:
@staticmethod
def map_nationality_codes_to_iso2(data: pd.Series, default=None):
"""
This function is meant to map AFAS nationality codes to the ISO2 equivalent code, this is the internationally accepted standard.
:param data: input with AFAS nationality codes
:param default: optional default value in case a key does not exist in the mapping. If left to None, will return original value
:return: mapped values
"""
return SalureFunctions.applymap(key=data, mapping=mapping, default=default)
@staticmethod
def map_iso2_codes_to_nationality_codes(data: pd.Series, default=None):
"""
This function is meant to map AFAS nationality codes to the ISO2 equivalent code, this is the internationally accepted standard.
:param data: input with AFAS nationality codes
:param default: optional default value in case a key does not exist in the mapping. If left to None, will return original value
:return: mapped values
"""
return SalureFunctions.applymap(key=data, mapping={v: k for k, v in mapping.items()}, default=default) | /salure_helpers_profit-2.1.2.tar.gz/salure_helpers_profit-2.1.2/salure_helpers/profit/profit_data_cleaner.py | 0.629205 | 0.45423 | profit_data_cleaner.py | pypi |
import urllib.parse
import warnings
import requests
import json
from typing import Union, List
from salure_helpers.salureconnect import SalureConnect
import pandas as pd
import os
class Salesforce(SalureConnect):
"""
This class is meant to be a simple wrapper around the Salesforce API. In order to start using it, authorize your application is Salureconnect.
You will receive a code which you can use to obtain a refresh token using the get_refresh_token method. Use this refresh token to refresh your access token always before you make a data call.
"""
def __init__(self, label: Union[str, List], debug: bool = False):
super().__init__()
self.credentials = self.get_system_credential(system='salesforce', label=label)
self.credential_id = self.credentials['id']
self.customer_url = self.credentials['auth']['instance_url']
self.debug = debug
self.api_version = 56.0
def __get_headers(self) -> dict:
credentials = self.refresh_system_credential(system='salesforce', system_id=self.credential_id)
headers = {"Authorization": f"Bearer {credentials['access_token']}",
"Content-Type": "application/json"}
if self.debug:
print(f"Headers: {headers}")
return headers
def query_data(self, query: str) -> pd.DataFrame:
"""
This method is used to send raw queries to Salesforce.
:param query: Querystring. Something like: 'select+Name,Id+from+Account'
:return: data or error
"""
params = {
"q": query
}
if self.debug:
print(f"Query: {query}")
params_str = urllib.parse.urlencode(params, safe=':+')
df = pd.DataFrame()
done = False
url = f"{self.customer_url}/services/data/v37.0/query/?"
while done is False:
response = requests.get(url=url, params=params_str, headers=self.__get_headers())
response.raise_for_status()
response = response.json()
done = response['done']
if done is False:
url = f"{self.customer_url}{response['nextRecordsUrl']}"
df = pd.concat([df, pd.DataFrame(response['records'])])
return df
def get_data(self, fields: Union[str, List], object_name: str, filter: str = None) -> pd.DataFrame:
"""
This method is used to send queries in a somewhat userfriendly wayt to Salesforce.
:param fields: fields you want to get
:param object_name: table or object name that the fields need to be retrieved from
:param filter: statement that evaluates to True or False
:return: data or error
"""
fields = ",".join(fields) if isinstance(fields, List) else fields
params = {
"q": f"SELECT {fields} FROM {object_name}{' WHERE ' + filter if filter is not None else ''}"
}
if self.debug:
print(f"Query: {params['q']}")
params_str = urllib.parse.urlencode(params, safe=':+')
df = pd.DataFrame()
done = False
url = f"{self.customer_url}/services/data/v37.0/query/?"
while done is False:
response = requests.get(url=url, params=params_str, headers=self.__get_headers())
response.raise_for_status()
response = response.json()
done = response['done']
if done is False:
url = f"{self.customer_url}{response['nextRecordsUrl']}"
df = pd.concat([df, pd.DataFrame(response['records'])])
return df
def create_contact(self, data: dict) -> json:
"""
This method is used to send queries in a somewhat userfriendly wayt to Salesforce.
:param data: fields you want to update
:return: data or error
"""
allowed_fields = {
'salure_customer': 'Klant_van_Salure__c',
# 'full_name': 'Name',
'first_name': 'FirstName',
'last_name': 'LastName',
'phone': 'Phone',
'email': 'Email',
'salesforce_account_id': 'AccountId',
'organisation_person_id': 'AFAS_persoons_ID__C'
}
required_fields = []
self.__check_fields(data=data, required_fields=required_fields, allowed_fields=list(allowed_fields.keys()))
body = {}
# Add allowed fields to the body
for field in (allowed_fields.keys() & data.keys()):
body.update({allowed_fields[field]: data[field]})
body = json.dumps(body)
if self.debug:
print(f"Payload: {body}")
response = requests.post(url=f"{self.customer_url}/services/data/v37.0/sobjects/Contact", data=body, headers=self.__get_headers())
response.raise_for_status()
if self.debug:
print(f"Response: {response.content, response.text}")
return response.json()
def update_contact(self, data: dict):
"""
This method is used to send queries in a somewhat userfriendly way to Salesforce.
:param data: fields you want to update
:return: nothing is returned when update is successful, otherwise raises error
"""
allowed_fields = {
'salure_customer': 'Klant_van_Salure__c',
# 'full_name': 'Name',
'first_name': 'FirstName',
'last_name': 'LastName',
'phone': 'Phone',
'email': 'Email',
'salesforce_account_id': 'AccountId',
'organisation_person_id': 'AFAS_persoons_ID__C'
}
required_fields = ['contact_id']
self.__check_fields(data=data, required_fields=required_fields, allowed_fields=list(allowed_fields.keys()))
body = {}
# Add allowed fields to the body
for field in (allowed_fields.keys() & data.keys()):
body.update({allowed_fields[field]: data[field]})
body = json.dumps(body)
if self.debug:
print(f"Payload: {body}")
response = requests.patch(url=f"{self.customer_url}/services/data/v37.0/sobjects/Contact/{data['contact_id']}", data=body, headers=self.__get_headers())
response.raise_for_status()
if self.debug:
print(f"Response: {response.content, response.text}")
@staticmethod
def __check_fields(data: Union[dict, List], required_fields: List, allowed_fields: List):
if isinstance(data, dict):
data = data.keys()
for field in data:
if field not in allowed_fields and field not in required_fields:
warnings.warn('Field {field} is not implemented. Optional fields are: {allowed_fields}'.format(field=field, allowed_fields=tuple(allowed_fields)))
for field in required_fields:
if field not in data:
raise ValueError('Field {field} is required. Required fields are: {required_fields}'.format(field=field, required_fields=tuple(required_fields)))
def query_table_metadata(self, table: str) -> requests.Response:
"""
This method is used to get the metadata of a table in Salesforce.
:param table: table or object name that the fields need to be retrieved from
:return: data or error
"""
url = f"{self.customer_url}/services/data/v{self.api_version}/sobjects/{table}/describe/"
response = requests.get(url, headers=self.__get_headers())
return response
def query_table(self, data_dir: str, table: str, fields: Union[str, List], filter: str = None, filename: str = None) -> pd.DataFrame:
"""
for information about the tables, see: https://developer.salesforce.com/docs/atlas.en-us.api_rest.meta/api_rest/resources_query.htm
With this method, you give a certain table you want to retrieve data from. This function contains a list of tables that are available in this function.
If you want to use an table that is not in this list, you can use the query_data method. In this function, there is extra information available per table like if it is
possible to get a full or an incremental load. This function will also check your previous loaded data and add new data to the previous data. Deleted data will also be deleted from
your dataset
:param data_dir: directory where the data will be stored. Both the full and incremental data will be stored here
:param table: table (it's a SQL query) you want to retrieve data from. If you call an table which is not in the approved tables, you will always get the full (not incremental) dataset.
:param fields: fields you want to get from the table
:param filter: possible filter you want to apply to the table
:param filename: filename you want to use for the data. If not given, the table will be used as filename
return: the dataset in pandas format
"""
approved_tables = {
'Account': 'incremental',
'AccountHistory': 'full',
'Appliaction__c': 'incremental',
'Beneficiary__c': 'incremental',
'Campaign': 'incremental',
'CampaignMember': 'incremental',
'Case': 'incremental',
'Contact': 'incremental',
'cpm__Installment__c': 'incremental',
'cpm__Payment__c': 'incremental',
'Document__c': 'incremental',
'Donaction_contracts__c': 'incremental',
'Donor_Type_Budget__c': 'incremental',
'Dorcas_Exchange_Rates__c': 'incremental',
'Dorcas_Report__c': 'incremental',
'General_Ledger_Account__c': 'incremental',
'Lead': 'incremental',
'npe03__Recurring_Donation__c': 'incremental',
'npsp__General_Accounting_Unit__c': 'incremental',
'Opportunity': 'incremental',
'pmnc__Project__c': 'incremental',
'Project_Budget__c': 'incremental',
'Project_Budget_Line__c': 'incremental',
'Project_Expense__c': 'incremental',
'Project_Indicator__c': 'incremental',
'Project_Result__c': 'incremental',
'Reporting_Unit__c': 'incremental',
'Result_Framework__c': 'incremental',
'Stakeholder__c': 'incremental',
'Volunteer_Assignment__c': 'incremental',
'User': 'full'
}
if table not in approved_tables.keys():
approved_tables[table] = 'full'
# First create a folder for the raw feather files
os.makedirs(data_dir, exist_ok=True)
os.makedirs(f'{data_dir}/cache/', exist_ok=True)
# Check if there is allready a file for the called table. If not, it's always the first and thus full load
filename = table if filename is None else filename
load_type = approved_tables[table]
initial_load = False if os.path.exists(f'{data_dir}/cache/{filename}.ftr') else True
fields = fields.split(',') if isinstance(fields, str) else fields
# Add metadata fields to the fields, then use set to avoid duplicates
fields.extend(['Id', 'CreatedDate', 'LastModifiedDate']) if load_type == 'incremental' else fields.extend(['Id'])
fields = ','.join(list(set(fields)))
# If it's an incremental load with a filter, load the records that are created or updated in the last 14 days (double records will be removed later) and apply the filter
if initial_load is False and load_type == 'incremental':
params = {"q": f"SELECT {fields} FROM {table} WHERE LastModifiedDate >= LAST_N_DAYS:7 {'' if filter is None or filter == '*' else ' AND ' + filter }"}
# In all other cases, just load the full dataset without any filter and any field which is needed for incremental loads
else:
params = {"q": f"SELECT {fields} FROM {table} {'' if filter is None or filter == '*' else ' WHERE ' + filter }"}
params_str = urllib.parse.urlencode(params, safe=':+')
url = f'{self.customer_url}/services/data/v{self.api_version}/query/?'
done = False
df = pd.DataFrame()
# With the created URL and parameters, call the API
while not done:
response = requests.get(url=url, params=params_str, headers=self.__get_headers())
response.raise_for_status()
done = response.json()['done']
df_temp = pd.DataFrame(response.json()['records'])
if 'attributes' in df_temp.columns:
del df_temp['attributes']
if not done:
url = f"{self.customer_url}{response.json()['nextRecordsUrl']}"
df = pd.concat([df_temp, df])
if load_type == 'incremental':
# Now get the previously fetched data which is stored in feather files and concat it with the new data. keep only the new data in case of duplicates
if os.path.exists(f'{data_dir}/cache/{filename}.ftr'):
df_old = pd.read_feather(f'{data_dir}/cache/{filename}.ftr')
df = pd.concat([df, df_old])
df.sort_values(by=['Id', 'LastModifiedDate'], ascending=False, inplace=True)
df = df.drop_duplicates(subset=['Id'], keep='first')
# Get the deleted rows from the table with a new call to Salesforce. Get all the deleted records and not only recent deleted ones because very old rows can be deleted as well since the last time the data was fetched
params = {"q": f"SELECT+Id,isDeleted+FROM+{table}+WHERE+isDeleted+=TRUE"}
params_str = urllib.parse.urlencode(params, safe=':+')
done = False
df_del = pd.DataFrame()
url = f'{self.customer_url}/services/data/v{self.api_version}/queryAll/?'
while done is False:
response = requests.get(url=url, params=params_str, headers=self.__get_headers())
response.raise_for_status()
done = response.json()['done']
df_temp = pd.DataFrame(response.json()['records'])
if done is False:
url = f"{self.customer_url}{response.json()['nextRecordsUrl']}"
df_del = pd.concat([df_temp, df_del])
# Join the deleted rows to the dataframe and filter out the deleted rows
if len(df_del) > 0:
del df_del['attributes']
df = df.merge(df_del, how='left', on='Id')
df = df[df['IsDeleted'].isna()].copy()
del df['IsDeleted']
# Save the final result to the cache as a feather file and to csv
if 'attributes' in df.columns:
del df['attributes']
df.reset_index(drop=True, inplace=True)
try:
df.to_feather(f'{data_dir}cache/{filename}.ftr')
except Exception as e:
df = df.astype(str)
df.to_feather(f'{data_dir}cache/{filename}.ftr', compression='lz4')
return df | /salure_helpers_salesforce-1.2.0.tar.gz/salure_helpers_salesforce-1.2.0/salure_helpers/salesforce/salesforce.py | 0.743261 | 0.186484 | salesforce.py | pypi |
from salure_helpers.sap.base_functions import BaseFunctions
import requests
import pandas as pd
class GetEndpoints:
def __init__(self, label: str, data_dir: str, certificate_file: str = None, key_file: str = None, debug: bool = False):
self.base_class = BaseFunctions(label=label, data_dir=data_dir, certificate_file=certificate_file, key_file=key_file, debug=debug)
self.data_dir = data_dir
self.debug = debug
def get_batch_data(self, uri: str, filter: str, id_key: str, id_list: list, batch_size: int = 10, xml_root: str = None):
"""
In some cases you want to get a lot of data from the endpoint. This function will combine a lot of calls for you into one dataframe
SAP is not able to do this itself.
:param uri: The URI you want to get the data from
:param filter: The filter you want to use to filter the data on
:param id_key: The key for all the ID's you want to get the data from. i.e. 'employee_id'
:param id_list: A list of all the ID's you want to get the data from. i.e. ['123456', '654321']
:param batch_size: the number of ID's you want to get the data from in one call. by default 10
:param xml_root: the response from SAP comes within XML format. Give the root of the XML file from which you want to get the data
:return: a Pandas dataframe with the data from the endpoint
"""
# Put all the given ID's in one list
id_batches = [id_list[i:i + batch_size] for i in range(0, len(id_list), batch_size)]
df = pd.DataFrame()
counter = 0
for i, id_batch in enumerate(id_batches):
# Creat the filter for each batch
temp_filter = ''
for id in id_batch:
temp_filter += f"{id_key} eq '{id}' or "
final_filter = f"({temp_filter[:-4]}) and {filter}"
# Now call the simple get_data endpoint
try:
df_tmp = self.base_class.get_data(uri=uri, xml_root=xml_root, filter=final_filter)
df = pd.concat([df, df_tmp], axis=0)
df.reset_index(drop=True, inplace=True)
except Exception as e:
print(f"Error in batch {i}: {e}")
counter += batch_size
print(f'Processed {counter} records from {len(id_list)}')
return df | /salure_helpers_sap-1.5.4.tar.gz/salure_helpers_sap-1.5.4/salure_helpers/sap/get_endpoints.py | 0.57821 | 0.32029 | get_endpoints.py | pypi |
from salure_helpers.sap.base_functions import BaseFunctions
import requests
import warnings
import json
from typing import Union, List
class PostEndpoints:
def __init__(self, label: str, data_dir: str, certificate_file: str = None, key_file: str = None, debug: bool = False):
self.base_class = BaseFunctions(label=label, data_dir=data_dir, certificate_file=certificate_file, key_file=key_file, debug=debug)
self.data_dir = data_dir
self.debug = debug
@staticmethod
def __check_fields(data: Union[dict, List], required_fields: List, allowed_fields: List):
if isinstance(data, dict):
data = data.keys()
for field in data:
if field not in allowed_fields and field not in required_fields:
warnings.warn('Field {field} is not implemented. Optional fields are: {allowed_fields}'.format(field=field, allowed_fields=tuple(allowed_fields)))
for field in required_fields:
if field not in data:
raise ValueError('Field {field} is required. Required fields are: {required_fields}'.format(field=field, required_fields=tuple(required_fields)))
def post_master_action(self, data: dict, overload_fields: dict = None):
"""
Upload the new employee to SAP through MasterAction
:param data: Fields that are allowed are listed in allowed fields array. Update this whenever necessary
:return: status code for request and optional error message
"""
allowed_fields = ["external_employee_subgroup"]
required_fields = ["afas_employee_id", "sap_employee_id", "start_date", "end_date", "action",
"reason", "employment_status", "company_code", "personal_area", "personal_sub_area",
"employee_group", "employee_sub_group", "sap_org_unit_id", "position_id", "cost_center", "salutation",
"last_name", "first_name", "prefix", "second_name_prefix", "initials",
"other_title", "date_of_birth", "language", "nationality", "title", "gender"]
self.__check_fields(data=data, required_fields=required_fields, allowed_fields=allowed_fields)
base_body = {
"Afasemployeenumber": data["afas_employee_id"],
"Employeenumber": "00000000" if data['sap_employee_id'] is None or data['sap_employee_id'] == '' else data['sap_employee_id'],
"Startdate": data["start_date"],
"Enddate": data["end_date"],
"Actiontype": data["action"],
"Reasonforaction": data["reason"],
"Employmentstatus": data["employment_status"],
"Companycode": data["company_code"],
"Personnelarea": data["personal_area"],
"Personnelsubarea": data["personal_sub_area"],
"Employeegroup": data["employee_group"],
"Employeesubgroup": data["employee_sub_group"],
"OrgunitID": data["sap_org_unit_id"],
"PositionID": data["position_id"],
"Costcenter": data["cost_center"],
"Salutation": data["salutation"],
"Lastname": data["last_name"],
"Firstname": data["first_name"],
"Nameprefix": data["prefix"],
"Secondnameprefix": data["second_name_prefix"],
"NameatBirth": data["last_name"],
"Initials": data["initials"],
"Othertitle": data["other_title"],
"Dateofbirth": data["date_of_birth"],
"Communicationlanguage": data["language"],
"Nationality": data["nationality"],
"Title": data["title"],
"Gender": data["gender"],
"ExternalEmployeesubgroup": data['external_employee_subgroup']
}
# Update the request body with update fields
response = self.base_class.post_data(uri='MasterActionPost/*', data=base_body, return_key='Employeenumber')
return response
def post_personal_data(self, data: dict, overload_fields: dict = None):
"""
Upload the employee personal data
:param data: Fields that are allowed are listed in allowed fields array. Update this whenever necessary
:return: status code for request and optional error message
"""
allowed_fields = ['last_name', 'first_name', 'name_prefix', 'second_name_prefix', 'middle_name', 'middle_name', 'initials', 'second_title',
'date_of_birth', 'language', 'nationality', 'title', 'gender', 'name_at_birth']
required_fields = ['afas_employee_id', 'sap_employee_id', 'start_date', 'end_date']
self.__check_fields(data=data, required_fields=required_fields, allowed_fields=allowed_fields)
base_body = {
"Afasemployeenumber": data["afas_employee_id"],
"Employeenumber": data["sap_employee_id"],
"Startdate": data["start_date"],
"Enddate": data["end_date"]
}
fields_to_update = {}
# Add fields that you want to update a dict (adding to body itself is too much text)
fields_to_update.update({"Lastname": data['last_name']}) if 'last_name' in data else fields_to_update
fields_to_update.update({"Firstname": data['first_name']}) if 'first_name' in data else fields_to_update
fields_to_update.update({"Nameprefix": data['name_prefix']}) if 'name_prefix' in data else fields_to_update
fields_to_update.update({"NameatBirth": data['name_at_birth']}) if 'name_at_birth' in data else fields_to_update
fields_to_update.update({"Secondnameprefix": data['second_name_prefix']}) if 'second_name_prefix' in data else fields_to_update
fields_to_update.update({"Middlename": data['middle_name']}) if 'middle_name' in data else fields_to_update
fields_to_update.update({"Initials": data['initials']}) if 'initials' in data else fields_to_update
fields_to_update.update({"Salutation": data['salutation']}) if 'salutation' in data else fields_to_update
fields_to_update.update({"Othertitle": data['second_title']}) if 'second_title' in data else fields_to_update
fields_to_update.update({"Dateofbirth": data['date_of_birth']}) if 'date_of_birth' in data else fields_to_update
fields_to_update.update({"Communicationlanguage": data['language']}) if 'language' in data else fields_to_update
fields_to_update.update({"Nationality": data['nationality']}) if 'nationality' in data else fields_to_update
fields_to_update.update({"Title": data['title']}) if 'title' in data else fields_to_update
fields_to_update.update({"Gender": data['gender']}) if 'gender' in data else fields_to_update
fields_to_update.update(overload_fields) if overload_fields is not None else ''
# Update the request body with update fields
base_body.update(fields_to_update)
response = self.base_class.post_data(uri='PersonalDataPost/*', data=base_body, return_key=None)
return response
def post_communication(self, data: dict, overload_fields: dict = None):
"""
Post communication data to SAP like email or KID
:param data: Fields that are allowed are listed in allowed fields array. Update this whenever necessary
:param overload_fields: Give the guid and value from a free field if wanted
:return: status code for request and optional error message
"""
allowed_fields = ['user_id', 'user_id_long']
required_fields = ['afas_employee_id', 'sap_employee_id', 'start_date', 'end_date', 'user_type']
self.__check_fields(data=data, required_fields=required_fields, allowed_fields=allowed_fields)
base_body = {
"Afasemployeenumber": data["afas_employee_id"],
"Employeenumber": data["sap_employee_id"],
"Startdate": data["start_date"],
"Enddate": data["end_date"],
"Usertype": data["user_type"]
}
fields_to_update = {}
# Add fields that you want to update a dict (adding to body itself is too much text)
fields_to_update.update({"UserId": data['user_id']}) if 'user_id' in data else fields_to_update
fields_to_update.update({"UserIdLong": data['user_id_long']}) if 'user_id_long' in data else fields_to_update
fields_to_update.update(overload_fields) if overload_fields is not None else ''
# Update the request body with update fields
base_body.update(fields_to_update)
response = self.base_class.post_data(uri='CommunicationPost/*', data=base_body, return_key='UserId')
return response
def post_organisational_unit(self, data: dict, overload_fields: dict = None):
"""
Post OrgUnits to SAP
:param data: Fields that are allowed are listed in allowed fields array. Update this whenever necessary
:param overload_fields: Give the guid and value from a free field if wanted
:return: status code for request and optional error message
"""
allowed_fields = ['sap_organisational_unit_id', 'language']
required_fields = ['start_date', 'end_date', 'organisational_unit_id', 'organisational_unit', 'parent_organisational_unit_id']
self.__check_fields(data=data, required_fields=required_fields, allowed_fields=allowed_fields)
base_body = {
"OrgUnitID": "00000000" if data['sap_organisational_unit_id'] is None else data['sap_organisational_unit_id'], # New organisational unit will have 00000000 as the OrgUnitID to indicate Creating new ones
"Startdate": data["start_date"],
"Enddate": data["end_date"],
"Shorttext": data["organisational_unit_id"],
"Longtext": data["organisational_unit"] if len(data['organisational_unit'])<=40 else data['organisational_unit'][:40], # SAP has a limit of 40 characters for longtext
"OrgunitIDassigend": data["parent_organisational_unit_id"]
}
fields_to_update = {}
# Add fields that you want to update a dict (adding to body itself is too much text)
fields_to_update.update({"Langu": data['language']}) if 'language' in data else fields_to_update
fields_to_update.update(overload_fields) if overload_fields is not None else ''
# Update the request body with update fields
base_body.update(fields_to_update)
response = self.base_class.post_data(uri='OrgUnitPost/*', data=base_body, return_key='OrgUnitID')
return response
def post_position(self, data: dict, overload_fields: dict = None):
"""
Post Position to SAP
:param data: Fields that are allowed are listed in allowed fields array. Update this whenever necessary
:param overload_fields: Give the guid and value from a free field if wanted
:return: status code for request and optional error message
"""
allowed_fields = ['sap_position_id', 'language', 'cost_center', 'is_manager']
required_fields = ['start_date', 'end_date', 'job_code', 'job', 'sap_organisational_unit_id']
self.__check_fields(data=data, required_fields=required_fields, allowed_fields=allowed_fields)
base_body = {
"PositionID": "00000000" if data['sap_position_id'] is None or data['sap_position_id'] == '' else data['sap_position_id'],
"Startdate": data['start_date'],
"Enddate": data['end_date'],
"Shorttext": data['job_code'],
"Longtext": data['job'] if len(data['job'])<=40 else data['job'][:40], # SAP has a limit of 40 characters for longtext
"Omleader": False if data['is_manager'] is None or data['is_manager'] == '' else data['is_manager'],
"OrgunitIDassigend": data['sap_organisational_unit_id']
}
# Add fields that you want to update a dict (adding to body itself is too much text)
fields_to_update = {}
fields_to_update.update({"Langu": data['language']}) if 'language' in data else fields_to_update
fields_to_update.update({"Costcenter": data['cost_center']}) if 'cost_center' in data else fields_to_update
fields_to_update.update(overload_fields) if overload_fields is not None else ''
# Update the request body with update fields
base_body.update(fields_to_update)
response = self.base_class.post_data(uri='PositionPost/*', data=base_body, return_key='PositionID')
return response
def post_workcenter(self, data: dict, overload_fields: dict = None):
"""
Post Workcenters to SAP, assign to an existing position
:param data: Fields that are allowed are listed in allowed fields array. Update this whenever necessary
:param overload_fields: Give the guid and value from a free field if wanted
:return: status code for request and optional error message
"""
allowed_fields = []
required_fields = ['workcenter_id', 'start_date', 'end_date', 'sap_position_id']
self.__check_fields(data=data, required_fields=required_fields, allowed_fields=allowed_fields)
base_body = {
"WorkcenterID": data['workcenter_id'],
"Startdate": data['start_date'],
"Enddate": data['end_date'],
"PositionID": data['sap_position_id'],
}
# Add fields that you want to update a dict (adding to body itself is too much text)
fields_to_update = {}
fields_to_update.update(overload_fields) if overload_fields is not None else ''
# Update the request body with update fields
base_body.update(fields_to_update)
response = self.base_class.post_data(uri='WorkcenterPost/*', data=base_body, return_key=None)
return response
def post_contract(self, data: dict, overload_fields: dict = None):
"""
Post Contracts to SAP
:param data: Fields that are allowed are listed in allowed fields array. Update this whenever necessary
:param overload_fields: Give the guid and value from a free field if wanted
:return: status code for request and optional error message
"""
allowed_fields = ['entry_group_date']
required_fields = ['afas_employee_id', 'sap_employee_id', 'start_date', 'end_date',
'contract_type', 'date_in_service', 'valid_until_date']
self.__check_fields(data=data, required_fields=required_fields, allowed_fields=allowed_fields)
base_body = {
"Afasemployeenumber": data['afas_employee_id'],
"Employeenumber": "00000000" if data['sap_employee_id'] is None or data['sap_employee_id'] == '' else data['sap_employee_id'],
"Startdate": data['start_date'],
"Enddate": data['end_date'],
"ContractType": data['contract_type'],
"InitialEntryDate": data['date_in_service'],
"VaildUntilDate": data['valid_until_date']
}
# Add fields that you want to update a dict (adding to body itself is too much text)
fields_to_update = {}
fields_to_update.update({"EntryGroupDate": data['entry_group_date']}) if 'entry_group_date' in data else fields_to_update
fields_to_update.update(overload_fields) if overload_fields is not None else ''
# Update the request body with update fields
base_body.update(fields_to_update)
response = self.base_class.post_data(uri='ContractElementPost/*', data=base_body, return_key=None)
return response
def post_additional_contract_element(self, data: dict, overload_fields: dict = None):
"""
Post Contracts to SAP
:param data: Fields that are allowed are listed in allowed fields array. Update this whenever necessary
:param overload_fields: Give the guid and value from a free field if wanted
:return: status code for request and optional error message
"""
allowed_fields = ['leading_level']
required_fields = ['afas_employee_id', 'sap_employee_id', 'start_date', 'end_date',
'start_date_leading_level', 'end_date_leading_level']
self.__check_fields(data=data, required_fields=required_fields, allowed_fields=allowed_fields)
base_body = {
"Afasemployeenumber": data['afas_employee_id'],
"Employeenumber": "00000000" if data['sap_employee_id'] is None or data['sap_employee_id'] == '' else data['sap_employee_id'],
"Startdate": data['start_date'],
"Enddate": data['end_date'],
"LeadingLevelStartdate": data['start_date_leading_level'],
"LeadingLevelEnddate": data['end_date_leading_level']
}
# Add fields that you want to update a dict (adding to body itself is too much text)
fields_to_update = {}
fields_to_update.update({"LeadingLevel": data['leading_level']}) if 'leading_level' in data else fields_to_update
fields_to_update.update(overload_fields) if overload_fields is not None else ''
# Update the request body with update fields
base_body.update(fields_to_update)
response = self.base_class.post_data(uri='AdditionalContractElementsPost/*', data=base_body, return_key=None)
return response
def post_basic_pay(self, data: dict, overload_fields: dict = None):
"""
Post Basic Pay data, like capacity level and monthly hours, to SAP
:param data: Fields that are allowed are listed in allowed fields array. Update this whenever necessary
:param overload_fields: Give the guid and value from a free field if wanted
:return: status code for request and optional error message
"""
allowed_fields = []
required_fields = ['afas_employee_id', 'sap_employee_id', 'start_date', 'end_date',
'hours_per_month', 'parttime_percentage']
self.__check_fields(data=data, required_fields=required_fields, allowed_fields=allowed_fields)
base_body = {
"Afasemployeenumber": data['afas_employee_id'],
"Employeenumber": "00000000" if data['sap_employee_id'] is None or data['sap_employee_id'] == '' else data['sap_employee_id'],
"Startdate": data['start_date'],
"Enddate": data['end_date'],
"WorkingHours": data['hours_per_month'],
"CapUtilLvl": data['parttime_percentage']
}
# Add fields that you want to update a dict (adding to body itself is too much text)
fields_to_update = {}
fields_to_update.update(overload_fields) if overload_fields is not None else ''
# Update the request body with update fields
base_body.update(fields_to_update)
response = self.base_class.post_data(uri='BasicPaysPost/*', data=base_body, return_key=None)
return response
def post_matrix_manager(self, data: dict, overload_fields: dict = None):
"""
Post Workcenters to SAP, assign to an existing position
:param data: Fields that are allowed are listed in allowed fields array. Update this whenever necessary
:param overload_fields: Give the guid and value from a free field if wanted
:return: status code for request and optional error message
"""
allowed_fields = []
required_fields = ['matrix_manager_position_id', 'start_date', 'end_date', 'sap_position_id']
self.__check_fields(data=data, required_fields=required_fields, allowed_fields=allowed_fields)
base_body = {
"ManagePositionID": data['matrix_manager_position_id'],
"Startdate": data['start_date'],
"Enddate": data['end_date'],
"PositionID": data['sap_position_id'],
}
# Add fields that you want to update a dict (adding to body itself is too much text)
fields_to_update = {}
fields_to_update.update(overload_fields) if overload_fields is not None else ''
# Update the request body with update fields
base_body.update(fields_to_update)
response = self.base_class.post_data(uri='MatrixManagerPost/*', data=base_body, return_key=None)
return response
def post_power_of_attorney(self, data: dict, overload_fields: dict = None):
"""
Post Power of Attorney to SAP
:param data: Fields that are allowed are listed in allowed fields array. Update this whenever necessary
:param overload_fields: Give the guid and value from a free field if wanted
:return: status code for request and optional error message
"""
allowed_fields = []
required_fields = ['afas_employee_id', 'start_date', 'end_date', 'power_of_attorney_code',
'company_code', 'value_limit', 'currency']
self.__check_fields(data=data, required_fields=required_fields, allowed_fields=allowed_fields)
base_body = {
"Afasemployeenumber": data['afas_employee_id'],
"Startdate": data['start_date'],
"Enddate": data['end_date'],
"PowerOfAttorney": data['power_of_attorney_code'],
"CompanyCode": data['company_code'],
"LimitOfAmount": data['value_limit'],
"Currency": data['currency']
}
# Add fields that you want to update a dict (adding to body itself is too much text)
fields_to_update = {}
fields_to_update.update(overload_fields) if overload_fields is not None else ''
# Update the request body with update fields
base_body.update(fields_to_update)
response = self.base_class.post_data(uri='PowersAttorneyPost/*', data=base_body, return_key=None)
return response
def post_absence(self, data: dict):
"""
Post Absence data to SAP
:param data: Fields that are allowed are listed in allowed fields array.
{
"Afasemployeenumber" : "70913119",
"Employeenumber" : "00000000",
"Startdate" : "2022-01-01",
"Enddate" : "2022-01-01",
"AbsenceType" : "0200",
"AbsenceHours" : "11.00"
}
"""
allowed_fields = []
required_fields = ['employee_id', 'date_of_absence', 'type_of_hours_code', 'hours']
self.__check_fields(data=data, required_fields=required_fields, allowed_fields=allowed_fields)
base_body = {
"Afasemployeenumber": data["employee_id"],
"Employeenumber": "00000000",
"Startdate": data["date_of_absence"],
"Enddate": data["date_of_absence"],
"AbsenceType": data["type_of_hours_code"],
"AbsenceHours": data["hours"]
}
response = self.base_class.post_data(uri='AbsencePost/*', data=base_body)
return response | /salure_helpers_sap-1.5.4.tar.gz/salure_helpers_sap-1.5.4/salure_helpers/sap/post_endpoints.py | 0.744099 | 0.36441 | post_endpoints.py | pypi |
from salure_helpers.sap.base_functions import BaseFunctions
import requests
class DelimitEndpoints:
def __init__(self, label: str, data_dir: str, certificate_file: str = None, key_file: str = None, debug: bool = False):
self.base_class = BaseFunctions(label=label, data_dir=data_dir, certificate_file=certificate_file, key_file=key_file, debug=debug)
self.data_dir = data_dir
self.debug = debug
def delimit_master_action(self, employee_id, start_date, end_date, action_type, reason_for_action):
"""
Delimit personal data
:param employee_id: the ID of the employee you want to delimit
:param start_date: the start date of the record you want to delimit
:param end_date: the end date at which the record will be delimit
:return: status
"""
data_filter = f"Afasemployeenumber='{employee_id}',Startdate='{end_date}',Enddate='9999-12-31',Actiontype='{action_type}',Reasonforaction='{reason_for_action}'" # Masteraction delimit is picking the enddate as the startdate for the termination, no need to use the original startdate of the record
response = self.base_class.delete_data(uri='MasterActionDel', filter=data_filter)
response.raise_for_status()
return response
def delimit_org_unit(self, org_unit, start_date, end_date):
"""
Delimit organisational units
:param org_unit: the ID of the organisational unit you want to delimit
:param start_date: the start date of the record you want to delimit
:param end_date: the end date at which the record will be delimit
:return: status
"""
data_filter = f"OrgUnitID='{org_unit}',Startdate='{start_date}',Enddate='{end_date}'"
response = self.base_class.delete_data(uri='OrgUnitDel', filter=data_filter)
response.raise_for_status()
return response
def delimit_position(self, position_id, start_date, end_date):
"""
Delimit positions in SAP
:param position_id: the ID of the organisational unit you want to delimit
:param start_date: the start date of the record you want to delimit
:param end_date: the end date at which the record will be delimit
:return: status
"""
data_filter = f"PositionID='{position_id}',Startdate='{start_date}',Enddate='{end_date}'"
response = self.base_class.delete_data(uri="PositionDel", filter=data_filter)
response.raise_for_status()
return response
def delete_workcenter(self, position_id, start_date, end_date, workcenter):
"""
Delimit positions in SAP
:param position_id: the ID of the organisational unit you want to delimit
:param start_date: the start date of the record you want to delimit
:param end_date: the end date at which the record will be delimit
:param workcenter: the workcenter you want to delimit
:return: status
"""
data_filter = f"PositionID='{position_id}',Startdate='{start_date}',Enddate='{end_date}',WorkcenterID='{workcenter}'"
response = self.base_class.delete_data(uri="WorkcenterDel", filter=data_filter)
response.raise_for_status()
return response
def delete_matrix_manager(self, position_id, start_date, end_date, matrix_manager_position_id):
"""
Delimit positions in SAP
:param position_id: the ID of the organisational unit you want to delimit
:param start_date: the start date of the record you want to delimit
:param end_date: the end date at which the record will be delimit
:param matrix_manager_position_id: the matrix_manager_position you want to delimit
:return: status
"""
data_filter = f"PositionID='{position_id}',Startdate='{start_date}',Enddate='{end_date}',ManagePositionID='{matrix_manager_position_id}'"
response = self.base_class.delete_data(uri="MatrixManagerDel", filter=data_filter)
response.raise_for_status()
return response
def delimit_contract(self, employee_id, start_date, end_date):
"""
Delimit positions in SAP
:param position_id: the ID of the organisational unit you want to delimit
:param start_date: the start date of the record you want to delimit
:param end_date: the end date at which the record will be delimit
:param workcenter: the workcenter you want to delimit
:return: status
"""
data_filter = f"Afasemployeenumber='{employee_id}',Startdate='{start_date}',Enddate='{end_date}'"
response = self.base_class.delete_data(uri="ContractElementDel", filter=data_filter)
response.raise_for_status()
return response
def delimit_additional_contract_element(self, employee_id, start_date, end_date):
"""
Delimit positions in SAP
:param position_id: the ID of the organisational unit you want to delimit
:param start_date: the start date of the record you want to delimit
:param end_date: the end date at which the record will be delimit
:param workcenter: the workcenter you want to delimit
:return: status
"""
data_filter = f"Afasemployeenumber='{employee_id}',Startdate='{start_date}',Enddate='{end_date}'"
response = self.base_class.delete_data(uri="AdditionalContractElementDel", filter=data_filter)
response.raise_for_status()
return response
def delimit_basic_pay(self, employee_id, start_date, end_date):
"""
Delimit the Basic Pay in SAP
:param employee_id: the AFAS Employee ID you want to delimit
:param start_date: the start date of the record you want to delimit
:param end_date: the end date at which the record will be delimit
:return: status
"""
data_filter = f"Afasemployeenumber='{employee_id}',Startdate='{start_date}',Enddate='{end_date}'"
response = self.base_class.delete_data(uri="BasicPayDel", filter=data_filter)
response.raise_for_status()
return response
def delimit_power_of_attorney(self, employee_id, start_date, end_date, power_of_attorney_code, company_code):
"""
Delimit power of attorney in SAP
:param employee_id: the AFAS ID of the employee in case
:param start_date: the start date of the record you want to delimit
:param end_date: the end date at which the record will be delimit
:param power_of_attorney_code: the code of the power of attorney you want to delimit
:param company_code: the company code of the power of attorney you want to delimit
:return: status
"""
data_filter = f"Afasemployeenumber='{employee_id}',Startdate='{start_date}',Enddate='{end_date}',PowerOfAttorney='{power_of_attorney_code}',CompanyCode='{company_code}'"
response = self.base_class.delete_data(uri="PowersAttorneyDel", filter=data_filter)
response.raise_for_status()
return response
def delimit_absence(self, employee_id, start_date, end_date):
"""
Delimit absence record in SAP
:param employee_id: the AFAS ID of the employee in case
:param start_date: the start date of the record you want to delimit
:param end_date: the end date at which the record will be delimit
:return: status
"""
data_filter = f"Afasemployeenumber='{employee_id}',Startdate='{start_date}',Enddate='{end_date}'"
response = self.base_class.delete_data(uri="AbsenceDel", filter=data_filter)
response.raise_for_status()
return response | /salure_helpers_sap-1.5.4.tar.gz/salure_helpers_sap-1.5.4/salure_helpers/sap/delimit_endpoints.py | 0.6488 | 0.270152 | delimit_endpoints.py | pypi |
import math
from typing import Union, Optional, List
import pandas as pd
import pymysql
from datetime import datetime
from salure_helpers.salureconnect import SalureConnect
class SQLServer(SalureConnect):
def __init__(self, label: Union[str, List] = None, return_queries: bool = False, debug=False):
# This is built in so you can use this class as a query generator for the SalureConnect Agent
super().__init__()
self.debug = debug
self.return_queries = return_queries
if self.return_queries:
print("Running in query generator mode")
else:
credentials = self.get_system_credential(system='mssql', label=label)
self.host = credentials['host']
self.user = credentials['user']
self.password = credentials['password']
self.database = credentials['database']
self.port = 1433 if credentials['port'] is None else credentials['port']
def raw_query(self, query, insert=False) -> Optional[Union[List, str]]:
if self.debug:
print(query)
if self.return_queries:
return query
connection = pymysql.connect(host=self.host, user=self.user, password=self.password, database=self.database, port=self.port)
cursor = connection.cursor()
cursor.execute(query)
if insert:
connection.commit()
connection.close()
else:
data = cursor.fetchall()
connection.close()
return list(data)
def update(self, table: str, columns: List, values: List, filter: str = None) -> Optional[str]:
update_values = ''
def __map_strings(item):
if isinstance(item, str):
return "'" + str(item) + "'"
elif isinstance(item, datetime):
return "'" + item.strftime("%Y-%m-%d %H:%M:%S") + "'"
else:
return str(item)
for index in range(len(columns)):
if index != len(columns) - 1:
update_values += f"{columns[index]} = {__map_strings(values[index])},"
else:
update_values += f"{columns[index]} = { __map_strings(values[index])}"
update_values = update_values.replace('None', 'DEFAULT')
query = f"UPDATE {table} SET {update_values} {filter};"
if self.debug:
print(query)
if self.return_queries:
return query
else:
connection = pymysql.connect(host=self.host, user=self.user, password=self.password, database=self.database, port=self.port)
cursor = connection.cursor()
resp = cursor.execute(query)
connection.commit()
connection.close()
return f"Updated {resp} rows in {table}"
def select(self, table: str, columns: List, filter: str = None) -> Union[List, str]:
query = f"SELECT {','.join(columns)} FROM {table} {filter if filter is not None else ''}"
if self.debug:
print(query)
if self.return_queries:
return query
else:
connection = pymysql.connect(host=self.host, user=self.user, password=self.password, database=self.database, port=self.port)
cursor = connection.cursor()
cursor.arraysize = 10000
cursor.execute(query)
data = cursor.fetchall()
connection.close()
return list(data)
def insert(self, table: str, df: pd.DataFrame, columns: List = None) -> Union[List, str]:
queries = []
for i in range(math.ceil(len(df.index) / 1000)):
# Split df in batches because you can insert maximum of 1000 rows in sql server at once
df_batch = df[0 + i * 1000: 1000 + i * 1000]
columns = columns if columns else ', '.join(str(column) for column in df_batch.columns)
values = ', '.join(str(index[1:]) for index in df_batch.itertuples())
values = values.replace('None', 'DEFAULT')
values = values.replace('NaT', 'NULL')
values = values.replace('nan', 'NULL')
values = values.replace("'NULL'", "NULL")
values = values.replace('"', "'")
values = values.replace("\\\\'", "''")
queries.append(f"""INSERT INTO {table} ({columns}) VALUES {values}""")
if self.debug:
print(queries)
if self.return_queries:
return queries
else:
connection = pymysql.connect(host=self.host, user=self.user, password=self.password, database=self.database, port=self.port)
cursor = connection.cursor()
resp = 0
for query in queries:
resp += cursor.execute(query)
connection.commit()
connection.close()
return f"Inserted {resp} rows in {table}" | /salure_helpers_sqlserver-1.0.2.tar.gz/salure_helpers_sqlserver-1.0.2/salure_helpers/sqlserver/sqlserver.py | 0.696062 | 0.234346 | sqlserver.py | pypi |
from pandas.io.json import json_normalize
from time import mktime
from salure_helpers.salureconnect import SalureConnect
from typing import List, Union
import requests
import pandas as pd
import numpy as np
import datetime
import json
import time
import sys
class Zermelo(SalureConnect):
def __init__(self, label: Union[str, List], storage_location, initial_zermelo_extract=False, extract_cancelled_appointments=True):
"""
Extracts data from source based on the entered parameters
For documentation see: https://wartburg.zportal.nl/static/swagger/ & https://zermelo.atlassian.net/wiki/display/DEV/API+Entities
:param storage_location: indicates the location where the extracted data file is saved
:param initial_zermelo_extract: store the extract as a delta file (true) or not (false)
:param extract_cancelled_appointments: doesn't get the cancelled appointments by default. Can be changed to an empty string to get the cancelled appointments
"""
super().__init__()
credentials = self.get_system_credential(system='zermelo', label=label)
self.access_token = credentials['token']
self.url = f"https://{credentials['customer']}.zportal.nl/api/v3/"
self.storage_location = storage_location
self.initial_zermelo_extract = initial_zermelo_extract
if extract_cancelled_appointments:
self.cancelled_appointments = ''
else:
self.cancelled_appointments = '&cancelled=false'
def run_all_extracts(self):
# The following endpoints are delivering such huge amounts of data, that these one should be splitted in seperate schoolyears
start_of_data = datetime.date(year=datetime.datetime.today().year, month=8, day=1).timetuple()
end_of_data = datetime.date(year=datetime.datetime.today().year + 2, month=8, day=1).timetuple()
if self.initial_zermelo_extract:
for i in range(1, 7):
start_of_data = datetime.date(year=datetime.datetime.today().year - i, month=8, day=1).timetuple()
end_of_data = datetime.date(year=datetime.datetime.today().year - i + 1, month=7, day=31).timetuple()
# self.get_zermelo_substituded_lessons(endpoint='substitutedlessons', fields=['contract', 'employee', 'appointment', 'start', 'end', 'changeDescription', 'appointmentInstance'],
# startdate=start_of_data, enddate=end_of_data)
self.get_zermelo_appointments(endpoint='appointments', fields=['id', 'start', 'end', 'type', 'remark', 'valid', 'cancelled', 'modified',
'moved', 'changeDescription', 'branch', 'branchOfSchool', 'created', 'lastModified',
'hidden', 'appointmentInstance', 'new', 'teachers', 'students', 'subjects', 'groups',
'locations', 'locationsOfBranch', 'groupsInDepartments'],
startdate=start_of_data, enddate=end_of_data)
elif datetime.datetime.today().month <= 7:
start_of_data = datetime.date(year=datetime.datetime.today().year - 1, month=8, day=1).timetuple()
end_of_data = datetime.date(year=datetime.datetime.today().year + 1, month=8, day=1).timetuple()
self.get_zermelo_appointments(endpoint='appointments', fields=['id', 'start', 'end', 'type', 'remark', 'valid', 'cancelled', 'modified',
'moved', 'changeDescription', 'branch', 'branchOfSchool', 'created', 'lastModified',
'hidden', 'appointmentInstance', 'new', 'teachers', 'students', 'subjects', 'groups',
'locations', 'locationsOfBranch', 'groupsInDepartments'],
startdate=start_of_data, enddate=end_of_data)
else:
self.get_zermelo_appointments(endpoint='appointments', fields=['id', 'start', 'end', 'type', 'remark', 'valid', 'cancelled', 'modified',
'moved', 'changeDescription', 'branch', 'branchOfSchool', 'created', 'lastModified',
'hidden', 'appointmentInstance', 'new', 'teachers', 'students', 'subjects', 'groups',
'locations', 'locationsOfBranch', 'groupsInDepartments'],
startdate=start_of_data, enddate=end_of_data)
self.get_zermelo(endpoint='branches', fields=['code', 'name'])
self.get_zermelo(endpoint='branchesofschools', fields=['id', 'schoolInSchoolYear', 'branch', 'name'])
self.get_zermelo(endpoint='choosableindepartments', fields=['id', 'subject', 'departmentOfBranch', 'departmentOfBranchCode', 'sectionOfBranch', 'clockHoursPerLesson', 'teachingLevelManually',
'teachingLevel', 'subjectType', 'subjectCode', 'subjectName', 'scheduleCode', 'subjectScheduleCode', 'lessonDemand', 'lessonHoursInClassPeriods'],
nested=True, nested_fields=['lessonHoursInClassPeriods'])
self.get_zermelo(endpoint='classperiods', fields=['id', 'name', 'schoolInSchoolYear', 'weeks'], nested=True, nested_fields=['weeks'])
self.get_zermelo(endpoint='contracts', fields=['id', 'start', 'end', 'employee', 'defaultFunctionCategory', 'teacherTeam', 'clockHoursGeneralTasks', 'clockHoursGeneralTasksManually',
'clockHoursTasks', 'clockHoursProfessionalDevelopmentManually', 'clockHoursProfessionalDevelopment', 'clockHoursNet', 'lessonsMax', 'type',
'yearFraction', 'fteYearLeave', 'ftePermanent', 'fteTemporary', 'fteNet', 'clockHoursGross', 'clockHoursBalance', 'clockHoursLessonsMax',
'lessonReducingTasks', 'taskSpace', 'taskBalance', 'lessonSpace', 'mainBranchOfSchool', 'school', 'schoolName', 'schoolYear', 'firstName',
'lastName', 'prefix', 'clockHoursLessons'])
self.get_zermelo(endpoint='departmentsofbranches', fields=['id', 'code', 'yearOfEducation', 'branchOfSchool', 'clockHoursPerLesson', 'schoolInSchoolYearId', 'schoolInSchoolYearName', 'studentCount', 'prognosticStudentCount'])
self.get_zermelo(endpoint='employees', fields=['userCode', 'commencementTeaching', 'commencementSchool', 'prefix', 'gender', 'dateOfBirth', 'firstName', 'lastName', 'street', 'houseNumber', 'postalCode', 'city'])
self.get_zermelo(endpoint='groups', fields=['id', 'code'])
self.get_zermelo(endpoint='groupindepartments', fields=['id', 'departmentOfBranch', 'name', 'isMainGroup', 'isMentorGroup', 'extendedName'])
self.get_zermelo(endpoint='holidays', fields=['id', 'schoolInSchoolYear', 'name', 'start', 'end'])
self.get_zermelo(endpoint='jobs', fields=['id', 'contract', 'functionCategory', 'employmentType', 'start', 'end', 'fteReal', 'fteManually', 'fte', 'type', 'employee', 'clockHoursGross'])
self.get_zermelo(endpoint='jobextensions', fields=['id', 'contract', 'start', 'end', 'fteReal', 'lessonsAndTasks', 'total', 'employee', 'fte', 'generalTasks', 'professionalDevelopment', 'personalBudget'])
self.get_zermelo(endpoint='leaves', fields=['id', 'contract', 'leaveType', 'leaveTypeName', 'start', 'end', 'total', 'leaveApproved', 'employee', 'fteReal'])
self.get_zermelo(endpoint='leavetypes', fields=['id', 'name', 'fixed', 'affectsPersonalBudget'])
self.get_zermelo(endpoint='locations', fields=['code'])
self.get_zermelo(endpoint='locationofbranches', fields=['id', 'name', 'parentteachernightCapacity', 'courseCapacity', 'branchOfSchool'])
self.get_zermelo(endpoint='plannedlessons', fields=['id', 'clockHoursPerLesson', 'clockHoursPerLessonManually', 'plannedGroups', 'lessonDemand', 'branchOfSchool', 'departmentOfBranches',
'lessonHoursInClassPeriods', 'subjects', 'sectionOfBranches', 'maxTeachingLevel', 'regularTeachingAssignments',
'prognosticStudentsPerTeacherCount', 'expectedTeacherCount', 'privateComment', 'publicComment'],
nested=True, nested_fields=['plannedGroups', 'departmentOfBranches', 'subjects', 'sectionOfBranches', 'regularTeachingAssignments', 'lessonHoursInClassPeriods'])
self.get_zermelo(endpoint='plannedgroups', fields=['id', 'choosableInDepartment', 'groupInDepartment', 'teachingLevel', 'subjectCode', 'groupInDepartmentName',
'groupInDepartmentIsMainGroup', 'groupInDepartmentIsMentorGroup', 'groupInDepartmentExtendedName', 'name', 'rank'])
self.get_zermelo(endpoint='schools', fields=['id', 'name', 'brin'])
self.get_zermelo(endpoint='schoolsinschoolyears', fields=['id', 'school', 'year', 'project', 'archived', 'projectName', 'schoolName', 'name'])
self.get_zermelo(endpoint='sectionassignments', fields=['contract', 'id', 'lessonHoursFirstDegree', 'lessonHoursSecondDegree', 'sectionOfBranch'])
self.get_zermelo_filtered(endpoint='selectedsubjects',
fields=['id', 'subjectSelection', 'choosableInDepartment', 'alternativeChoosableInDepartment', 'manualLessonInvolvement',
'exemption', 'studentInDepartment', 'subjectCode', 'subject', 'segmentCode', 'lessonInvolvement'],
startdate=start_of_data,
enddate=end_of_data)
self.get_zermelo(endpoint='sections', fields=['id', 'abbreviation', 'name', 'sectionOfBranches'], nested=True, nested_fields=['sectionOfBranches'])
self.get_zermelo(endpoint='students', fields=['dateOfBirth', 'email', 'street', 'houseNumber', 'postalCode', 'city', 'lastName', 'prefix',
'firstName', 'lwoo', 'userCode', 'studentInDepartments'], nested=True, nested_fields=['studentInDepartments'])
self.get_zermelo(endpoint='studentsindepartments', fields=['id', 'student', 'departmentOfBranch', 'groupInDepartments', 'mainGroup'])
self.get_zermelo(endpoint='subjectselections', fields=['id', 'selectedSubjects', 'studentCode', 'departmentOfBranch'])
self.get_zermelo(endpoint='subjectselectionsubjects', fields=['id', 'code', 'name', 'scheduleCode'])
self.get_zermelo(endpoint='taskassignments', fields=['branchOfSchool', 'contract', 'employee', 'contract', 'hours', 'hoursReplacement', 'taskGroup', 'taskInBranchOfSchool',
'type', 'start', 'end'])
self.get_zermelo(endpoint='tasks', fields=['abbreviation', 'id', 'name', 'taskGroup', 'taskGroupAbbreviation'])
self.get_zermelo(endpoint='taskgroups', fields=['abbreviation', 'description', 'id', 'name'])
self.get_zermelo(endpoint='tasksinbranchofschool', fields=['branchOfSchool', 'clockHoursAssigned', 'clockHoursBalance', 'id', 'maxHours', 'task', 'taskAbbreviation'])
self.get_zermelo(endpoint='teacherteams', fields=['id', 'name', 'branchOfSchool', 'departmentOfBranches'], nested=True, nested_fields=['departmentOfBranches'])
self.get_zermelo(endpoint='teachingassignments', fields=['id', 'contract', 'plannedLesson', 'type', 'regular', 'lessonHoursInClassPeriodsManually', 'startWeek', 'endWeek',
'employee', 'regularContract', 'teachingQualificationStatus', 'lessonHoursNet', 'clockHoursPerLesson', 'clockHoursTotal',
'sectionOfBranches', 'publicComment', 'privateComment', 'clockHoursAlgorithm', 'replacements',
'lessonHoursInClassPeriods', 'plannedGroups'],
nested=True, nested_fields=['lessonHoursInClassPeriods', 'plannedGroups', 'sectionOfBranches', 'replacements'])
self.get_zermelo(endpoint='teachingqualifications', fields=['id', 'employee', 'choosable', 'startWeek', 'endWeek', 'diploma', 'teachingLevel', 'choosableAbbreviation', 'status', 'name'])
self.get_zermelo(endpoint='workforceparameters', fields=['defaultclockhoursperlesson', 'id', 'schoolInSchoolYear'])
def get_zermelo(self, endpoint, fields, nested=False, nested_fields=[]):
"""
Database in Zermelo is divided in different endpoints which consist of fields. Some fields are nested, which
means that some data lines have a subdivision.
:param endpoint: name of the endpoint. Not case-sensitive
:param fields: make a selection of the desired fields. Selection of the field(s) is case-sensitive
:param nested: field is nested or not
:param nested_fields: select nested fields
:return: returns error when extract didn't succeed
"""
try:
print('{0} - Started with {1}'.format(time.strftime('%H:%M:%S'), endpoint))
url_fields = ','.join(fields)
url = '{0}{1}?access_token={2}&fields={3}'.format(self.url, endpoint, self.access_token, url_fields)
if nested:
# Get the response without any transformation
response = requests.get(url).json()['response']['data']
# From all the fields, hold only the meta_fields (the not nested fields)
meta_fields = fields.copy()
for nested_field in nested_fields:
meta_fields.remove(nested_field)
# From the initial response, create a dataframe with only the meta_fields
df = pd.DataFrame(response)
df = df[meta_fields]
# Set the columns in df as the same type as in the original df. Sometimes, an empty field will change the column type in df_temp
# to object while the dtype in the original df is int or float. This will give an error when merging
existing_field_types = dict(df.dtypes)
for column in df:
if column in existing_field_types:
existing_dtype = existing_field_types[column]
if existing_dtype == 'int64' or existing_dtype == 'float64':
df[column] = df[column].fillna(0)
df[column] = df[column].astype(existing_dtype)
# Loop through the nested_fields, create a dataframe for each nested field and join the result to the initial dataframe
for nested_field in nested_fields:
# If the nested_field hold a key, value pair, then the record_prefix is usable. Only a value give a TypeError. Catch this error and rename the column
try:
df_temp = pd.io.json.json_normalize(data=response, meta=meta_fields, record_path=[nested_field], record_prefix='{}_'.format(nested_field))
except TypeError:
df_temp = pd.io.json.json_normalize(data=response, meta=meta_fields, record_path=[nested_field])
df_temp.rename(columns={0: nested_field}, inplace=True)
# Set the columns in df_temp as the same type as in the original df. Sometimes, an empty field will change the column type in df_temp
# to object while the dtype in the original df is int or float. This will give an error when merging
existing_field_types = dict(df.dtypes)
for column in df_temp:
if column in existing_field_types:
existing_dtype = existing_field_types[column]
if existing_dtype == 'int64' or existing_dtype == 'float64':
df_temp[column] = df_temp[column].fillna(0)
df_temp[column] = df_temp[column].astype(existing_dtype)
# Merge the initial dataframe and the new one
df = pd.merge(df, df_temp, how='left', on=meta_fields)
data = df
else:
init_response = json.loads(requests.get(url).content)
status = init_response['response']['status']
if status == 200:
data = pd.DataFrame(init_response['response']['data'])
# Check each column if the column only holds integers. If yes, and the type is a Float, set type to float. Otherwise, this gives problems in QLik Sense (2 becomes 2.0)
for column in data.columns:
try:
if data.loc[:, column].dtype == np.float64 or data.loc[:, column].dtype == np.int64:
data.loc[:, column].fillna(0, inplace=True)
else:
data.loc[:, column].fillna('', inplace=True)
column_name = 'check_{}'.format(column)
data.loc[:, column_name] = data.apply(lambda x: 'int64' if x[column].is_integer() else 'float', axis=1)
if 'float' in data.loc[:, column_name].values:
pass
else:
data.loc[:, column] = data.loc[:, column].astype('int64')
del data[column_name]
except Exception as e:
continue
else:
data = init_response['response']['message']
print(data)
data.index.name = '{0}_id'.format(endpoint)
file = '{0}{1}.csv'.format(self.storage_location, endpoint)
data.to_csv(file, sep='|', decimal=',')
print('{0} - {1} saved'.format(time.strftime('%H:%M:%S'), endpoint))
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
error = str(e)[:400].replace('\'', '').replace('\"', '') + ' | Line: {}'.format(exc_tb.tb_lineno)
return error
def get_zermelo_substituded_lessons(self, endpoint, fields, startdate, enddate):
start = time.time()
fields = ','.join(fields)
# Loop through the data per 3 days (3600 seconds * 24 hours * 3 days) because the dataset is too big to receive in once. Start three years back
df = pd.DataFrame()
start_epoch = int(time.mktime(startdate))
last_epoch = int(time.mktime(enddate))
while start_epoch < last_epoch:
try:
if (start_epoch + (3600 * 24 * 7)) > last_epoch:
end_epoch = int(last_epoch)
else:
end_epoch = int(start_epoch + (3600 * 24 * 7))
url = '{0}{1}?access_token={2}&fields={3}&start={4}&end={5}'.format(self.url, endpoint, self.access_token, fields, start_epoch, end_epoch)
data = requests.get(url).json()['response']['data']
# checks if data is not empty list
if data:
df_new = pd.DataFrame(data)
df_new['changeDescription'] = df_new['changeDescription'].str.replace('\n', '')
df_new['changeDescription'] = df_new['changeDescription'].str.replace('\r', '')
df = pd.concat([df, df_new])
print('Substituded: Start: {}, End: {}, Length: {}'.format(start_epoch, end_epoch, len(df_new)))
start_epoch += (3600 * 24 * 7)
except Exception as e:
print('{} - Error at timestamp {}: {}'.format(time.strftime('%H:%M:%S'), start_epoch, e))
start_epoch += (3600 * 24 * 7)
# Store the total dataframe to a new csv file
df.drop_duplicates(inplace=True)
df.reset_index(drop=True, inplace=True)
df.index.name = '{0}_id'.format(endpoint)
file = '{}{}_{}.csv'.format(self.storage_location, 'substituded_lessons', datetime.datetime.fromtimestamp(mktime(startdate)).strftime('%Y-%m-%d'))
df.to_csv(file, sep='|', decimal=',')
print('Done in {} seconds'.format(time.time() - start))
def get_zermelo_appointments(self, endpoint, fields, startdate, enddate):
start = time.time()
fields = ','.join(fields)
df = pd.DataFrame()
start_epoch = int(time.mktime(startdate))
last_epoch = int(time.mktime(enddate))
while start_epoch < last_epoch:
try:
if (start_epoch + (3600 * 24 * 7)) > last_epoch:
end_epoch = int(last_epoch)
else:
end_epoch = int(start_epoch + (3600 * 24 * 7))
print(start_epoch, end_epoch)
url = '{0}{1}?access_token={2}&fields={3}&start={4}&end={5}&includeHidden=True{6}&valid=True'.format(self.url, endpoint, self.access_token, fields, start_epoch, end_epoch, self.cancelled_appointments)
data = requests.get(url).json()['response']['data']
# checks if data is not empty list
if data:
df_new = pd.DataFrame(data)
df_new['remark'] = df_new['remark'].str.replace('\n', '')
df_new['remark'] = df_new['remark'].str.replace('\r', '')
df = pd.concat([df, df_new])
print('Appointments: Start: {}, End: {}, Length: {}'.format(start_epoch, end_epoch, len(df_new)))
# Add one week
start_epoch += (3600 * 24 * 7)
except Exception as e:
print('{} - Error at timestamp {}: {}'.format(time.strftime('%H:%M:%S'), start_epoch, e))
start_epoch += (3600 * 24 * 7)
# During summer vacation, it can occur that no data call is executed. The df is empty in this case
if len(df) > 0:
# Reset some columns from Float to Int
df.loc[:, 'branchOfSchool'].fillna(0, inplace=True)
df.loc[:, 'branchOfSchool'] = df.loc[:, 'branchOfSchool'].astype('int64')
df.reset_index(inplace=True, drop=True)
# Subtract all the nested layers from the appointments and save to separate files
self.appointments_create_lookup_table(df, 'students', 'userCode', startdate)
self.appointments_create_lookup_table(df, 'teachers', 'userCode', startdate)
self.appointments_create_lookup_table(df, 'subjects', 'scheduleCode', startdate)
self.appointments_create_lookup_table(df, 'groups', 'code', startdate)
self.appointments_create_lookup_table(df, 'locations', 'code', startdate)
self.appointments_create_lookup_table(df, 'locationsOfBranch', 'id', startdate)
self.appointments_create_lookup_table(df, 'groupsInDepartments', 'id', startdate)
# Store the total dataframe to a new csv file
df.drop(columns=['students', 'teachers', 'subjects', 'groups', 'locations', 'locationsOfBranch', 'groupsInDepartments'], inplace=True)
df.index.name = '{0}_id'.format(endpoint)
file = '{}{}_{}.csv'.format(self.storage_location, 'appointments', datetime.datetime.fromtimestamp(mktime(startdate)).strftime('%Y-%m-%d'))
df.to_csv(file, sep='|', decimal=',')
print('Done in {} seconds'.format(time.time() - start))
def appointments_create_lookup_table(self, df, col_name, link_id, startdate):
df = df[['id', col_name]]
# Only hold rows whith filled arrays
df = df[df[col_name].apply(len) > 0]
appointments_lookup_df = []
for index, row in df.iterrows():
appointmentId = row['id']
to_link = row[col_name]
for item in to_link:
appointments_lookup_df.append({'appointmentsId': appointmentId, link_id: item})
df = pd.DataFrame(appointments_lookup_df)
file = '{0}{1}.csv'.format(self.storage_location, 'appointments_{}_{}'.format(col_name, datetime.datetime.fromtimestamp(mktime(startdate)).strftime('%Y-%m-%d')))
df.index.name = 'appointments_{0}_id'.format(col_name)
df.to_csv(file, sep='|', decimal=',')
def get_zermelo_filtered(self, endpoint, fields, startdate, enddate):
start = time.time()
fields = ','.join(fields)
# Loop through the data per 3 days (3600 seconds * 24 hours * 3 days) because the dataset is too big to receive in once. Start three years back
df = pd.DataFrame()
start_epoch = int(time.mktime(startdate))
last_epoch = int(time.mktime(enddate))
while start_epoch < last_epoch:
try:
if (start_epoch + (3600 * 24 * 7)) > last_epoch:
end_epoch = int(last_epoch)
else:
end_epoch = int(start_epoch + (3600 * 24 * 7))
url = '{0}{1}?access_token={2}&fields={3}&start={4}&end={5}'.format(self.url, endpoint, self.access_token, fields, start_epoch, end_epoch)
data = requests.get(url).json()['response']['data']
# checks if data is not empty list
if data:
df_new = pd.DataFrame(data)
df = pd.concat([df, df_new])
print(f'{endpoint}: Start: {start_epoch}, End: {end_epoch}, Length: {len(df_new)}')
start_epoch += (3600 * 24 * 7)
except Exception as e:
print('{} - Error at timestamp {}: {}'.format(time.strftime('%H:%M:%S'), start_epoch, e))
# Store the total dataframe to a new csv file
df.drop_duplicates(inplace=True)
df.reset_index(drop=True, inplace=True)
df.index.name = '{0}_id'.format(endpoint)
file = '{0}{1}.csv'.format(self.storage_location, endpoint)
df.to_csv(file, sep='|', decimal=',')
print('{0} - {1} saved'.format(time.strftime('%H:%M:%S'), endpoint))
print('Done in {} seconds'.format(time.time() - start)) | /salure_helpers_zermelo-0.0.2.tar.gz/salure_helpers_zermelo-0.0.2/salure_helpers/zermelo/zermelo.py | 0.524395 | 0.273338 | zermelo.py | pypi |
import zeep
from zeep import Client
from zeep.helpers import serialize_object
import json
class PlanningNl(object):
def __init__(self, username, password):
"""
This package is meant to get from or push data to planning.nl. An timeschedule tool. The requests are all made with soap.
See for documentation of the SOAP API the following link: https://wiki.visibox.nl/display/VIS/Algemene+informatie
:param username: The username for planning.nl
:param password: The corresponding password
"""
client = Client('https://api.visibox.nl/v1/authentication?wsdl')
request_body = {'username': username, 'password': password}
response = client.service.login(request_body)
self.token = response['authenticationToken']
def get_human_resources(self, employee_id=None):
"""
Get all the employees with their basic information (like personal details).
:param employee_id: An optional filter to receive data from only 1 employee
:return: the data returned from the SOAP request
"""
# Determine the client for the resources
client = Client('https://api.visibox.nl/v1/resource?wsdl')
# Get all the employee resources without filters
if employee_id == None:
request_body = {
'authentication': {
'token': self.token
},
'humanResource': {
'resourceClass': 'PERSONNEL',
'humanResourceMatcher': {
'allowMultiple': 'true'
}
}
}
else:
request_body = {
'authentication': {
'token': self.token
},
'humanResource': {
'resourceClass': 'PERSONNEL',
'humanResourceMatcher': {
'allowMultiple': 'true',
'number': employee_id
}
}
}
response = client.service.getHumanResources(**request_body)['matcherResults']['matcherResult'][0]['entities']['entity']
data = serialize_object(response)
return data
def get_departments(self):
"""
Get all the departments from planning.nl. Only the departments, no employees etc.
:return: the data returned from the SOAP request
"""
client = Client('https://api.visibox.nl/v1/department?wsdl')
request_body = {
'authentication': {
'token': self.token
},
'departmentMatcher': {
'allowMultiple': 'true'
}
}
response = client.service.getDepartments(**request_body)['matcherResults']['matcherResult'][0]['entities']['entity']
data = serialize_object(response)
return data
def get_resource_sorts(self):
"""
Get all the resource sorts which are used in the planning.nl environment. This could be the job descriptions for example
:return: the data returned from the SOAP request
"""
client = Client('https://api.visibox.nl/v1/resource?wsdl')
request_body = {
'authentication': {
'token': self.token
},
'resourceSort': {
'resourceClass': 'PERSONNEL',
'resourceSortMatcher': {
'allowMultiple': 'true'
}
}
}
response = client.service.getResourceSorts(**request_body)['matcherResults']['matcherResult'][0]['entities']['entity']
data = serialize_object(response)
return data
def get_resource_types(self):
"""
Get all the resource types which are used in the planning.nl environment. This could be the departments for example
:return: the data returned from the SOAP request
"""
client = Client('https://api.visibox.nl/v1/resource?wsdl')
request_body = {
'authentication': {
'token': self.token
},
'resourceType': {
'resourceClass': 'PERSONNEL',
'resourceTypeMatcher': {
'allowMultiple': 'true'
}
}
}
response = client.service.getResourceTypes(**request_body)['matcherResults']['matcherResult'][0]['entities']['entity']
data = serialize_object(response)
return data
def get_human_departments(self):
"""
Get the ID's of employees with their department ID's
:return: the data returned from the SOAP request
"""
client = Client('https://api.visibox.nl/v1/resource?wsdl')
request_body = {
'authentication': {
'token': self.token
},
'resourceDepartment': {
'resourceClass': 'PERSONNEL',
'resourceDepartmentMatcher': {
'allowMultiple': 'true',
'humanResourceMatcher': {
'allowMultiple': 'true'
},
'departmentMatcher': {
'allowMultiple': 'true'
}
}
}
}
response = client.service.getResourceDepartments(**request_body)['matcherResults']['matcherResult'][0]['entities']['entity']
data = serialize_object(response)
return data
def push_resource_sorts(self, resource_sort_number, resource_sort_description):
"""
Push resource sorts to planning.nl
:param resource_sort_number: The number of the resource. Watch out, this is not the ID of the resource sort. The ID is not needed
:param resource_sort_description: The description of the resource sort
:return: the data returned from the SOAP request
"""
client = Client('https://api.visibox.nl/v1/resource?wsdl')
request_body = {
'authentication': {
'token': self.token
},
'resourceSort': {
'resourceClass': 'PERSONNEL',
'resourceSortMatcher': {
'allowMultiple': 'true',
'number': resource_sort_number
},
'resourceSortValues': {
'number': resource_sort_number,
'description': resource_sort_description
}
}
}
response = client.service.pushResourceSorts(**request_body)
data = serialize_object(response)
return data
def push_resource_types(self, resource_type_number, resource_type_description):
"""
Push resource types to planning.nl
:param resource_type_number: The number of the resource. Watch out, this is not the ID of the resource type. The ID is not needed
:param resource_type_description: The description of the resource sort
:return: the data returned from the SOAP request
"""
client = Client('https://api.visibox.nl/v1/resource?wsdl')
request_body = {
'authentication': {
'token': self.token
},
'resourceType': {
'resourceClass': 'PERSONNEL',
'resourceTypeMatcher': {
'allowMultiple': 'true',
'number': resource_type_number
},
'resourceTypeValues': {
'number': resource_type_number,
'description': resource_type_description
}
}
}
response = client.service.pushResourceTypes(**request_body)
data = serialize_object(response)
return data
def push_human_recources(self, employee_id, job, department_number, department, first_name, last_name_prefix, last_name, phone_mobile, phone_fixed, street, zip_code, city, email, birth_date):
client = Client('https://api.visibox.nl/v1/resource?wsdl')
request_body = {
'authentication': {
'token': self.token
},
'humanResource': {
'resourceClass': 'PERSONNEL',
'humanResourceMatcher': {
'allowMultiple': 'true',
'number': employee_id
},
'humanResourceValues': {
'resourceType': {
'allowMultiple': 'true',
'description': job
},
'resourceSort': {
'allowMultiple': 'true',
'number': department_number,
'description': department
},
'number': employee_id,
'firstname': first_name,
'lastnamePrefix': last_name_prefix,
'lastname': last_name,
'phoneMobile': phone_mobile,
'phoneFixed': phone_fixed,
'streetName': street,
'postalCode': zip_code,
'city': city,
'email': email,
'birthDate': birth_date,
'doNotShowAsProjectLeader': 'true',
'doNotShowAsPhaseLeader': 'true'
}
}
}
response = client.service.pushHumanResource(**request_body)
data = serialize_object(response)
return data
def logout(self):
client = Client('https://api.visibox.nl/v1/authentication?wsdl')
# Logout so that the token is destroyed. The logoutRequest key is mandatory but because there is no available value set a zeep.xsd.SkipValue
request_body = {
'authentication': {
'token': self.token
},
'logoutRequest': zeep.xsd.SkipValue
}
response = client.service.logout(**request_body)
return response | /salure_helpers-27.2.5.tar.gz/salure_helpers-27.2.5/salure_helpers/planning_nl.py | 0.652241 | 0.23699 | planning_nl.py | pypi |
import math
import time
from typing import Union, Optional
import pandas as pd
import pymysql
from datetime import datetime
class SQLServer:
def __init__(self, host: str = None, user: str = None, password: str = None, database: str = None, port: int = 1433, debug=False):
self.host = host
self.user = user
self.password = password
self.database = database
self.port = port
self.debug = debug
def raw_query(self, query, insert=False):
start = time.time()
connection = pymysql.connect(host=self.host, user=self.user, password=self.password, database=self.database, port=self.port)
cursor = connection.cursor()
cursor.execute(query)
if insert:
connection.commit()
data = '{0} - Writing data took {1} seconds'.format(time.strftime('%H:%M:%S'), time.time() - start)
else:
data = cursor.fetchall()
connection.close()
return data
def update(self, table: str, columns: list, values: list, filter: str = None, return_queries: bool = False) -> Optional[str]:
update_values = ''
def __map_strings(item):
if isinstance(item, str):
return "'" + str(item) + "'"
elif isinstance(item, datetime):
return "'" + item.strftime("%Y-%m-%d %H:%M:%S") + "'"
else:
return str(item)
for index in range(len(columns)):
if index != len(columns) - 1:
update_values += f"{columns[index]} = {__map_strings(values[index])},"
else:
update_values += f"{columns[index]} = { __map_strings(values[index])}"
update_values = update_values.replace('None', 'DEFAULT')
query = f"UPDATE {table} SET {update_values} {filter};"
if self.debug:
print(query)
if return_queries:
return query
else:
connection = pymysql.connect(host=self.host, user=self.user, password=self.password, database=self.database, port=self.port)
cursor = connection.cursor()
cursor.execute(query)
connection.commit()
connection.close()
def select(self, table: str, columns: list, filter: str = None, return_queries: bool = False) -> Union[list, str]:
query = f"SELECT {','.join(columns)} FROM {table} {filter if filter is not None else ''}"
if self.debug:
print(query)
if return_queries:
return query
else:
connection = pymysql.connect(host=self.host, user=self.user, password=self.password, database=self.database, port=self.port)
cursor = connection.cursor()
cursor.arraysize = 10000
cursor.execute(query)
data = cursor.fetchall()
connection.close()
return list(data)
def insert(self, table: str, df: pd.DataFrame, columns: list = None, return_queries: bool = False) -> Union[list, str]:
queries = []
for i in range(math.ceil(len(df.index) / 1000)):
# Split df in batches because you can insert maximum of 1000 rows in sql server at once
df_batch = df[0 + i * 1000: 1000 + i * 1000]
columns = columns if columns else ', '.join(str(column) for column in df_batch.columns)
values = ', '.join(str(index[1:]) for index in df_batch.itertuples())
values = values.replace('None', 'DEFAULT')
values = values.replace('NaT', 'NULL')
values = values.replace('nan', 'NULL')
values = values.replace("'NULL'", "NULL")
values = values.replace('"', "'")
values = values.replace("\\\\'", "''")
queries.append(f"""INSERT INTO {table} ({columns}) VALUES {values}""")
if return_queries:
return queries
else:
connection = pymysql.connect(host=self.host, user=self.user, password=self.password, database=self.database, port=self.port)
cursor = connection.cursor()
for query in queries:
cursor.execute(query)
connection.commit()
connection.close() | /salure_helpers-27.2.5.tar.gz/salure_helpers-27.2.5/salure_helpers/sqlserver.py | 0.674158 | 0.202502 | sqlserver.py | pypi |
import os
import mandrill
import codecs
import base64
class MailClient(object):
@staticmethod
def send_mail(api_token, email_to: list, email_from: str, name_from: str, subject: str, language='NL', content=None, attachment=None):
"""
Send a mail with the salureconnect layout and using mandrill
:param api_token: the mandrill API token
:param email_to: a list with name and mailadress to who the mail must be send
:param email_from: the mailaddress from the sender. Should be enabled in mandrill
:param name_from: The name from the sender. Will be printed at the bottom of the mail
:param subject: The subject of the email
:param language: Determines the salutation and greeting text. For example Beste or Dear
:param content: The message of the email
:param attachment: The attachment of an email loaded as binary file (NOT the location of the file)
:return: If the sending of the mail is successful or not
"""
mandrill_client = mandrill.Mandrill(api_token)
# Load the html template for e-mails
html_file_location = '{}/templates/mail_salureconnect.html'.format(os.path.dirname(os.path.abspath(__file__)))
html_file = codecs.open(html_file_location, 'r')
html = html_file.read()
if language == 'NL':
salutation = 'Beste '
greeting_text = 'Met vriendelijke groet'
else:
salutation = 'Dear '
greeting_text = 'Kind regards'
# Set attachment. Do not in loop because of errors
if attachment is not None:
opened_attachment = attachment.read()
encoded_attachment = base64.b64encode(opened_attachment).decode('utf-8')
# Pick the configurations from the config file and create the mail
response = []
for i in email_to:
new_html = html.replace('{', '{{'). \
replace('}', '}}'). \
replace('{{subject}}', '{subject}'). \
replace('{{title}}', '{title}'). \
replace('{{salutation}}', '{salutation}'). \
replace('{{name}}', '{name}'). \
replace('{{content}}', '{content}'). \
replace('{{greeting}}', '{greeting}').format(subject=subject, title=subject, salutation=salutation, name=i['name'], content=content, greeting=greeting_text)
if attachment is None:
mail = {
'from_email': email_from,
'from_name': name_from,
'subject': subject,
'html': new_html,
'to': [{'email': i['mail'],
'name': i['name'],
'type': 'to'}]
}
else:
mail = {
'from_email': email_from,
'from_name': name_from,
'attachments': [{'content': encoded_attachment,
'name': attachment.name.split('/')[-1]
}],
'subject': subject,
'html': new_html,
'to': [{'email': i['mail'],
'name': i['name'],
'type': 'to'}]
}
# Send the mail and return the result per mail address
result = {
'Send to': i,
'result': mandrill_client.messages.send(mail, False, 'Main Pool')
}
response.append(result)
return response | /salure_helpers-27.2.5.tar.gz/salure_helpers-27.2.5/salure_helpers/mail_client.py | 0.420243 | 0.187542 | mail_client.py | pypi |
import datetime
import json
import time
import pandas as pd
import requests
from pandas.io.json import json_normalize
import base64
class Elastic:
def __init__(self, host, port, user, password, client_user, client_password):
"""
A package to create indexes, users, roles, getting data, etc.
:param host: the url or local IP of the elasticsearch cluster. For example http://localhost
:param port: In most cases 9200 but it can be different
:param user: The Elasticsearch user in case
:param password: the password of the user
"""
self.host = f'{host}:{port}'
encoded_bytes = base64.b64encode(f'{user}:{password}'.encode("utf-8"))
self.elastic_token = str(encoded_bytes, "utf-8")
self.headers = {
'Authorization': f'Basic {self.elastic_token}',
'Content-Type': 'application/json'
}
self.client_user = client_user
self.client_password = client_password
health = self.get_health()
if not health == 200:
raise Exception('No connection could be established')
def create_index_template(self, template_name, index_template):
"""
Create a index template. A template can be used for the creating of indices. See for documentation https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html
:param template_name: The name of the template
:param index_template: The template with properties etc.
:return: The response of the request to elasticsearch
"""
# Check if the index template exists, if not, create it
url = f'{self.host}/_template/{template_name}'
response = requests.get(url, headers=self.headers)
if response.status_code < 300:
return f'Index template \'{template_name}\' already exists'
else:
body = json.dumps(index_template)
response = requests.put(url=url, data=body, headers=self.headers)
return response
def create_index(self, index_name: str, update_role=False, corresponding_role_name=None):
"""
Creates a new index in the elasticsearch instance. Documentation: https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html
:param index_name: The name of the desired index
:param update_role: True if you want to update the role of this customer so this new index is added to the roll
:param corresponding_role_name: The role at which the index should be added so that users with that role can read and update the index
:return: The response of the request to elasticsearch
"""
url = f'{self.host}/{index_name}'
response = requests.get(url=url, headers=self.headers)
if 200 <= response.status_code < 300:
return f'Index \'{index_name}\' already exists'
else:
response = requests.put(url=url, headers=self.headers)
if update_role:
self.create_or_update_role(corresponding_role_name) # TODO Add indices as input to create_or_update_role()
return response
def create_or_update_role(self, role_name: str, index: list):
"""
Creates or updates a role. All the indexes which start with the same constraint as the role_name, are added to the role
:param role_name: The name of the desired role. Most often the username which also is used for the mysql database user (sc_customer)
:param index: one or more index names in a list.
:return: The response of the request to elasticsearch
"""
url = f'{self.host}/_security/role/{role_name}'
# Set the body
body = {
'cluster': ['transport_client'],
'indices': [
{
'names': index,
'privileges': ['read', 'write', 'read_cross_cluster', 'view_index_metadata', 'index']
}
],
'metadata': {
'version': 1
}
}
body = json.dumps(body)
response = requests.put(url=url, data=body, headers=self.headers)
return response
def create_user(self, user_name: str, password: str, user_description: str, roles: list):
"""
Creates a user if it doesn't exist.
:param user_name: The username. Most often the username which also is used for the mysql database user (sc_customer)
:param password: Choose a safe password. At least 8 characters long
:param user_description: A readable description. Often the customer name
:param roles: Give the roles to which the user belongs in a list. Most often the same role_name as the user_name
:return: The response of the request to elasticsearch
"""
url = f'{self.host}/_security/user/{user_name}'
body = {
'password': f'{password}',
'roles': roles,
'full_name': f'{user_description}'
}
body = json.dumps(body)
response = requests.put(url=url, data=body, headers=self.headers)
return response
def delete_index(self, index_name):
"""
Deletes an existing index if it exists. Documentation: https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-index.html
:param index_name: The index you want to delete
:return: The response of the request to elasticsearch
"""
# Check if index exists
url = f'{self.host}/{index_name}'
response = requests.get(url=url, headers=self.headers)
# Delete index if it exists
if response.status_code >= 400:
return f'Index \'{index_name}\' does not exist'
else:
response = requests.delete(url=url, headers=self.headers)
return response
def get_all_docs_from_index(self, index):
"""
Get all the documents from a certain index
:param index: the name of the index
:return: The response of the request to elasticsearch
"""
size = 10000
# Get all indices with the given index from the function parameter. For each day a new index.
indices = requests.get(url=self.host + '/' + index + '*/_settings').json()
index_list = {}
for index in indices:
index_date = datetime.date(int(index[-10:-6]), int(index[-5:-3]), int(index[-2:]))
index_list[str(index_date)] = index
for key, value in sorted(index_list.items()):
if key == str(time.strftime("%Y-%m-%d")):
url = f'{self.host}/{value}/_search'
# initial request
params = {"size": size, "scroll": "10m"}
response = requests.get(url=url, params=params).json()
# next requests until finished
scroll_id = response['_scroll_id']
total = response['hits']['total']
response = json_normalize(response['hits']['hits'])
response.drop(['_id', '_index', '_score', '_type'], axis=1, inplace=True)
# start all the request to elastic based on the scroll_id and add to the initial response
loop_boolean = True
body = json.dumps({"scroll": "10m", "scroll_id": scroll_id})
url = f'{self.host}/_search/scroll'
headers = {'Content-Type': 'application/json'}
while loop_boolean and total > size:
next_response = json_normalize(requests.post(url=url, data=body, headers=headers).json()["hits"]["hits"])
next_response.drop(['_id', '_index', '_score', '_type'], axis=1, inplace=True)
response = pd.concat([loop_boolean, next_response], ignore_index=True)
print(f'Received {len(next_response)} documents from index {index}')
if len(next_response) != size:
loop_boolean = False
return response
def get_health(self):
"""
Check if a there is a connection with elasticsearch
:return: the statuscode of the request
"""
# Get the health of the database connection
health = requests.get(url=f'{self.host}/_cat/health?', headers=self.headers).status_code
return health
def get_indices(self):
# Get all the indices
indices = requests.get(url=f'{self.host}/_cat/indices?format=json', headers=self.headers).json()
return indices
def log_line(self, timestamp: str, information: dict):
"""
Write a line to the elasticsearch database
:param timestamp: the date and time of logging
:param information: the information to be inserted into the database, which should correspond with a template
:return: the response of the post request
"""
# Look for template with matching name
url = f'{self.host}/_template/{self.client_user}?pretty'
response = requests.get(url, headers=self.headers)
if response.status_code >= 400:
return f'Cannot find an index template for index \'{self.client_user}\' with the same name.'
template = response.json()
template_properties = template[self.client_user]['mappings']['properties']
# Check if provided information matches the template
correct_size = (len(information) == len(template_properties))
if not correct_size:
return f'The input argument \'information\' should be a dictionary with {len(template_properties)} keys'
correct_keys = True
items = []
for key, value in template_properties.items():
items.append(f'{key} ({value["type"]})')
if correct_keys:
correct_keys = key in information
if not correct_keys:
return f'The input argument \'information\' should be a dictionary with the following keys (type of ' \
f'value): {", ".join(items)}'
# Creating the index of the current date if it does not exist yet
created_at = timestamp
index_name = f'{self.client_user}-{created_at[:10]}'
url = f'{self.host}/{index_name}'
response = requests.get(url=url, headers=self.headers)
if response.status_code >= 400:
self.create_index(index_name)
# Create or update the role for the current client
df = json_normalize(self.get_indices())
df = df[df['index'].str.startswith(self.client_user)]
client_indices = df['index'].tolist()
self.create_or_update_role(self.client_user, client_indices)
# Create user for the current client if it doesn't exist yet
description = self.client_user
self.create_user(self.client_user, self.client_password, description, [self.client_user])
# Add new document
url = f'{self.host}/{index_name}/_doc/'
body = json.dumps(information)
response = requests.post(url=url, data=body, headers=self.headers)
return response | /salure_helpers-27.2.5.tar.gz/salure_helpers-27.2.5/salure_helpers/elastic.py | 0.621311 | 0.199776 | elastic.py | pypi |
import os
import pandas as pd
import requests
import json
from typing import List, Union
class SalureConnect:
def __init__(self, customer: str, api_token: str, staging: bool = False):
self.customer = customer
self.api_token = api_token
self.url = 'https://staging.salureconnect.com/api/v1/' if staging else 'https://salureconnect.com/api/v1/'
def __get_headers(self):
return {
'Authorization': f'SalureToken {self.api_token}',
'salure-customer': self.customer
}
def get_system_credential(self, system: str, label: Union[str, list], test_environment: bool = False) -> json:
"""
This method retrieves authentication credentials from salureconnect.
It returns the json data if the request does not return an error code
:param system: specifies which token is used. (lowercase)
:param label: reference to the used label
:param test_environment: boolean if the test environment is used
:return json response from salureconnect
"""
response = requests.get(url=f'{self.url}connector/{system}', headers=self.__get_headers())
response.raise_for_status()
credentials = response.json()
# rename parameter for readability
if isinstance(label, str):
labels = [label]
else:
labels = label
# filter credentials based on label. All labels specified in label parameter should be present in the credential object
credentials = [credential for credential in credentials if all(label in credential['labels'] for label in labels)]
if system == 'profit':
credentials = [credential for credential in credentials if credential['isTestEnvironment'] is test_environment]
if len(credentials) == 0:
raise ValueError(f'No credentials found for {system}')
if len(credentials) != 1:
raise ValueError(f'Multiple credentials found for {system} with the specified labels')
return credentials[0]
def refresh_system_credential(self, system: str, system_id: int) -> json:
"""
This method refreshes Oauth authentication credentials in salureconnect.
It returns the json data if the request does not return an error code
:param system: specifies which token is used. (lowercase)
:param system_id: system id in salureconnect
:return json response from salureconnect
"""
response = requests.post(url=f'{self.url}connector/{system}/{system_id}/refresh', headers=self.__get_headers())
response.raise_for_status()
credentials = response.json()
return credentials
def list_files(self) -> json:
"""
This method is to list the available files from the SalureConnect API
:return json with credentials
"""
response = requests.get(url=f"{self.url}file-storage/files", headers=self.__get_headers())
response.raise_for_status()
return response.json()
def download_files(self, output_path: os.PathLike, filter_upload_definition_ids: List = None, filter_file_names: List = None, filter_deleted=False):
"""
This method can be used to download multiple files from salureconnect at once.
:param output_path: folder in which to save the downloaded files
:param filter_upload_definition_ids: filter files on specific file definitions
:param filter_file_names: filter files on specific filenames
:param filter_deleted: filter boolean if you want to retrieve deleted files as well
"""
response = requests.get(url=f"{self.url}file-storage/files", headers=self.__get_headers())
response.raise_for_status()
files = response.json()
for file_object in files:
# Only get file(s) that are in filter
if (filter_upload_definition_ids is None or file_object['fileuploadDefinition']['id'] in filter_upload_definition_ids) and \
(filter_file_names is None or file_object['file_name'] in filter_file_names) and pd.isnull(file_object['deleted_at']) is not filter_deleted:
file_string = requests.get(url=f"{self.url}file-storage/files/{file_object['id']}/download", headers=self.__get_headers())
with open(f"{output_path}{file_object['file_name']}", mode='wb') as file:
file.write(file_string.content)
def download_file(self, file_id: int, file_name_and_path: str):
"""
This method downloads a specific file to the specified path. The file is identified bij the file_id parameter.
:param file_id: file id that the file is identified by in SalureConnect
:param file_name_and_path: file name
"""
response = requests.get(url=f"{self.url}file-storage/files/{file_id}/download", headers=self.__get_headers())
response.raise_for_status()
with open(file_name_and_path, mode='wb') as file:
file.write(response.content) | /salure_helpers-27.2.5.tar.gz/salure_helpers-27.2.5/salure_helpers/salureconnect.py | 0.739046 | 0.262783 | salureconnect.py | pypi |
import urllib.parse
import requests
import json
from typing import Union, List
class Salesforce(object):
"""
This class is meant to be a simple wrapper around the Salesforce API. In order to start using it, authorize your application is Salureconnect.
You will receive a code which you can use to obtain a refresh token using the get_refresh_token method. Use this refresh token to refresh your access token always before you make a data call.
"""
def __init__(self, customer_url: str, client_id: str, client_secret: str, redirect_uri: str = None,):
self.customer_url = customer_url
self.client_id = client_id
self.client_secret = client_secret
self.redirect_uri = 'https://salureconnect.com/api/manage/connectors/oauth2/salesforce' if redirect_uri is None else redirect_uri
self.access_token = ''
def get_refresh_token(self, authorization_code: str) -> requests.Response:
"""
This method is for one time use. After obtaining the code from SalureConnect, this method is used to get a refresh token.
:param authorization_code: code you obtained from SalureConnect after going through the OAuth flow
:return: json with refresh token
"""
params = {
"grant_type": "authorization_code",
"client_id": self.client_id,
"client_secret": self.client_secret,
"redirect_uri": self.redirect_uri,
"code": authorization_code
}
response = requests.get(url="https://login.salesforce.com/services/oauth2/token?", params=params)
return response
def refresh_access_token(self, refresh_token: str):
"""
This method can be used to get a new access_token
:param refresh_token: refresh token you have obtained from the get_refresh_token method
:return: nothing. Access property in class is set to latest access token
"""
params = {
"grant_type": "refresh_token",
"client_id": self.client_id,
"refresh_token": refresh_token
}
response = requests.get(url="https://login.salesforce.com/services/oauth2/token?", params=params)
if response.status_code == 200:
self.access_token = response.json()['access_token']
else:
raise ConnectionError(f"There was a problem getting the access token. Response is: {response}")
def query_data(self, query: str) -> requests.Response:
"""
This method is used to send raw queries to Salesforce.
:param query: Querystring. Something like: 'select+Name,Id+from+Account'
:return: data or error
"""
headers = {"Authorization": f"Bearer {self.access_token}"}
params = {
"q": query
}
params_str = urllib.parse.urlencode(params, safe=':+')
response = requests.request(method="GET", url=f"{self.customer_url}services/data/v37.0/query/?", params=params_str, headers=headers)
return response
def get_data(self, fields: Union[str, List], object_name: str, filter: str = None) -> requests.Response:
"""
This method is used to send queries in a somewhat userfriendly wayt to Salesforce.
:param fields: fields you want to get
:param object_name: table or object name that the fields need to be retrieved from
:param filter: statement that evaluates to True or False
:return: data or error
"""
headers = {"Authorization": f"Bearer {self.access_token}"}
fields = ",".join(fields) if isinstance(fields, List) else fields
params = {
"q": f"SELECT {fields} FROM {object_name}{' WHERE ' + filter if filter is not None else ''}"
}
params_str = urllib.parse.urlencode(params, safe=':+')
response = requests.get(url=f"{self.customer_url}services/data/v37.0/query/?", params=params_str, headers=headers)
return response | /salure_helpers-27.2.5.tar.gz/salure_helpers-27.2.5/salure_helpers/salesforce.py | 0.869521 | 0.255919 | salesforce.py | pypi |
import base64
import asyncio
import time
import aiohttp
from typing import List
from salure_helpers import SalureConnect
class ProfitExtractAsync:
def __init__(self, salureconnect_connection: SalureConnect, label: str, test_environment: bool = False, debug=False):
self.salureconnect_connection = salureconnect_connection
if test_environment:
self.base_url = 'resttest.afas.online'
else:
self.base_url = 'rest.afas.online'
credentials = self.salureconnect_connection.get_system_credential(system='profit', label=label, test_environment=test_environment)
self.environment = credentials['environment']
base64token = base64.b64encode(credentials['token'].encode('utf-8')).decode()
self.headers = {'Authorization': 'AfasToken ' + base64token}
self.got_all_results = False
self.debug = debug
async def get_data(self, connectors: List = None, parameters: dict = {}, batch_size: int = 8, take: int = 40000) -> dict:
"""
This (asynchronous) function functions as a wrapper that can carry out multiple single get requests to be able
to get all data from profit in an asynchronous and efficient way. Only use this function in async code, otherwise use the profit class to call this from a sync function.
:param connectors: Names of the connectors to be extracted. If not provided, keys of parameters dict will be used
:param parameters: multilevel dict of filters per connector. Key must always be the connector, then dict like {connector: {"filterfieldids": fields, "filtervalues": values, "operatortypes": operators}
:return: data in json format
"""
url = f'https://{self.environment}.{self.base_url}/profitrestservices/connectors/'
batch_number = 0
total_response = {}
self.got_all_results = False
while not self.got_all_results:
async with aiohttp.ClientSession(headers=self.headers, timeout=aiohttp.ClientTimeout()) as session:
requests = [self.get_request(url=url,
connector=connector,
params={**(parameters[connector] if connector in parameters.keys() else {}), **{
"skip": take * (i + batch_number * batch_size),
"take": take}},
session=session,
take=take) for i in range(batch_size) for connector in connectors]
response = await asyncio.gather(*requests, return_exceptions=False)
# Flatten response (multiple dicts with the same key (connectorname) and different values are returned)
for item in response:
for key, value in item.items():
if key in total_response.keys():
total_response[key].extend(value)
else:
total_response[key] = value
batch_number += 1
return total_response
async def get_request(self, url: str, connector: str, params: dict, session: aiohttp.ClientSession, take: int):
"""
This function carries out a single get request given the inputs. It is used as input for the abovementioned wrapper,
get_data_content. Note that this function cannot be called it itself, but has to be started via get_data_content.
:param url: profit url to retrieve the data.
:param params: body of the request.
:param session: type of the request.
:return: data in json format
"""
if self.debug:
print(f"started request for {connector} at: {time.time()}")
async with session.get(url=f"{url}{connector}", params=params) as resp:
if resp.status >= 400:
raise ConnectionError(f"Got error: {resp.status, await resp.text()} while retrieving data from connector {connector}")
response = await resp.json()
response = response['rows']
if len(response) < take:
if self.debug:
print(f"request with params: {params} was the last request with {len(response)} rows")
self.got_all_results = True
else:
if self.debug:
print(f"request with params: {params} has {len(response)} rows")
return {connector: response}
async def get_meta_data(self, connector: str = None):
"""
This function makes sure that you can create a list of connector names without having to call another class.
:return: returns a list of all connectors in the environment.
"""
url = f"https://{self.environment}.{self.base_url}/profitrestservices/metainfo{f'/get/{connector}' if connector is not None else ''}"
async with aiohttp.ClientSession(headers=self.headers, timeout=aiohttp.ClientTimeout(), raise_for_status=True) as session:
async with session.get(url=f"{url}") as resp:
if resp.status >= 400:
raise ConnectionError(f"Got error: {resp.status, await resp.text()} while retrieving data from connector {connector}")
response = await resp.json()
response = response[f"{'getConnectors' if connector is None else 'fields'}"]
return response | /salure_helpers-27.2.5.tar.gz/salure_helpers-27.2.5/salure_helpers/helpers/profit_extract_async.py | 0.72594 | 0.246148 | profit_extract_async.py | pypi |
from collections import OrderedDict
import os
import apache_beam as beam
import tensorflow as tf
import pymysql
from typing import Any, Dict, List, Text
from tfx import types
from tfx.components.base import base_executor
from tfx.types import artifact_utils
from salure_tfx_extensions.proto import mysql_config_pb2
from tensorflow_serving.apis import prediction_log_pb2
from tfx.utils import io_utils
from google.protobuf import json_format
import numpy as np
_TELEMETRY_DESCRIPTORS = ['MySQLPusher']
CUSTOM_EXPORT_FN = 'custom_export_fn'
_MODULE_FILE_KEY = 'module_file'
_PREDICTION_LOGS_FILE_NAME = 'prediction_logs'
class Executor(base_executor.BaseExecutor):
"""
Executor that loads in inference results and calculated percentile values,
return the input files with a label to the results.
"""
def Do(self, input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any]) -> None:
"""
Args:
input_dict: Input dict from input key to a list of Artifacts.
- examples: Tensorflow Examples
output_dict: Output dict from output key to a list of Artifacts.
- output_examples: Tensorflow Examples
exec_properties: A dict of execution properties.
In this case there are no items in exec_properties, as stated by BaseComponentSpec
Returns:
None
"""
self._log_startup(input_dict, output_dict, exec_properties)
predictions = artifact_utils.get_single_instance(input_dict['inference_result'])
predictions_path = predictions.uri
predictions_uri = io_utils.all_files_pattern(predictions_path)
percentile_values_uri = artifact_utils.get_single_uri(input_dict['percentile_values'])
percentile_values = io_utils.read_string_file(os.path.join(percentile_values_uri, 'percentile_values.txt'))
percentile_values = eval(percentile_values)
with self._make_beam_pipeline() as pipeline:
data = (pipeline
| 'ReadPredictionLogs' >> beam.io.ReadFromTFRecord(
predictions_uri,
coder=beam.coders.ProtoCoder(prediction_log_pb2.PredictionLog))
| 'ParsePredictionLogs' >> beam.Map(parse_predictlog, percentile_values))
_ = (data
| 'WritePredictionLogs' >> beam.io.WriteToText(
file_path_prefix=os.path.join(predictions_path, _PREDICTION_LOGS_FILE_NAME),
num_shards=1,
file_name_suffix=".json"))
print(f"Json format prediction results saved to {predictions_path, _PREDICTION_LOGS_FILE_NAME}")
_ = (data
| 'Write To MySQL db' >> _ExampleToMySQL(exec_properties))
@beam.ptransform_fn
@beam.typehints.with_input_types(beam.Pipeline)
def _ExampleToMySQL(
pipeline: beam.Pipeline,
exec_properties: Dict[Text, any]):
mysql_config = mysql_config_pb2.MySQLConnConfig()
json_format.Parse(exec_properties['connection_config'], mysql_config)
table_name = exec_properties['table_name']
inference_id = exec_properties['inference_id']
return (pipeline
| 'WriteMySQLDoFN' >> beam.ParDo(_WriteMySQLDoFn(mysql_config, table_name, inference_id)))
class _WriteMySQLDoFn(beam.DoFn):
"""Inspired by:
https://github.com/esakik/beam-mysql-connector/blob/master/beam_mysql/connector/io.py"""
def __init__(self,
mysql_config: mysql_config_pb2.MySQLConnConfig,
table_name,
inference_id):
super(_WriteMySQLDoFn, self).__init__()
self.mysql_config = json_format.MessageToDict(mysql_config)
self.table_name = table_name
self.inference_id = inference_id
def start_bundle(self):
self._column_str = []
self._values = []
def process(self, element, *args, **kwargs):
columns = []
values = []
for column, value in element.items():
if column not in ['periode', 'periode_uitgevoerd', 'medewerker_id', 'looncomponent_extern_nummer',
'werkgever_id', 'boekjaar', 'bedrag', 'score', 'predict_label',
'salarisverwerkingsplan_id']:
continue
elif column == 'periode':
col = 'period'
elif column == 'periode_uitgevoerd':
col = 'period_executed'
elif column == 'salarisverwerkingsplan_id':
col = 'salarisverwerkingsplan_id'
elif column == 'boekjaar':
col = 'year'
elif column == 'looncomponent_extern_nummer':
col = 'wagecomponent_id'
elif column == 'werkgever_id':
col = 'employer_id'
elif column == 'medewerker_id':
col = 'employee_id'
elif column == 'bedrag':
col = 'amount'
elif column == 'predict_label':
col = 'label'
elif column == 'score':
col = 'score'
columns.append(col)
values.append(value)
columns.append('inference_id')
values.append(self.inference_id)
value_str = ", ".join(
[
f"{'NULL' if value is None else value}" if isinstance(value, (type(None), int, float)) else f"'{value}'"
for value in values
]
)
self._values.append("(" + value_str + ")")
self._column_str = "(" + ", ".join(columns) + ")"
def finish_bundle(self):
if len(self._values):
value_str = ", ".join(self._values)
query = f"INSERT INTO {self.mysql_config['database']}.{self.table_name} {self._column_str} VALUES {value_str};"
client = pymysql.connect(**self.mysql_config)
cursor = client.cursor()
cursor.execute(query)
self._values.clear()
self._column_str = ""
client.commit()
cursor.close()
client.close()
def parse_score(predict_val, percentile_values):
pct_len = len(percentile_values) - 1
tmp = percentile_values.copy()
tmp.append(predict_val)
idx = list(np.argsort(tmp)).index(len(percentile_values))
if idx <= int(pct_len / 2):
return 0 # "0-50%"
elif idx <= int(pct_len * 75 / 100):
return 1 # "50-75%"
elif idx <= int(pct_len * 85 / 100):
return 2 # "75-85%"
elif idx <= int(pct_len * 95 / 100):
return 3 # "85-95%"
else:
return 4 # ">95%"
def parse_predictlog(pb, percentile_values):
predict_val = None
response_tensor = pb.predict_log.response.outputs["output"]
if len(response_tensor.half_val) != 0:
predict_val = response_tensor.half_val[0]
elif len(response_tensor.float_val) != 0:
predict_val = response_tensor.float_val[0]
elif len(response_tensor.double_val) != 0:
predict_val = response_tensor.double_val[0]
elif len(response_tensor.int_val) != 0:
predict_val = response_tensor.int_val[0]
elif len(response_tensor.string_val) != 0:
predict_val = response_tensor.string_val[0]
elif len(response_tensor.int64_val) != 0:
predict_val = response_tensor.int64_val[0]
elif len(response_tensor.bool_val) != 0:
predict_val = response_tensor.bool_val[0]
elif len(response_tensor.uint32_val) != 0:
predict_val = response_tensor.uint32_val[0]
elif len(response_tensor.uint64_val) != 0:
predict_val = response_tensor.uint64_val[0]
if predict_val is None:
ValueError("Encountered response tensor with unknown value")
predict_label = parse_score(predict_val, percentile_values)
example = pb.predict_log.request.inputs["examples"].string_val[0]
example = tf.train.Example.FromString(example)
results = parse_pb(example)
results['score'] = predict_val
results['predict_label'] = predict_label
return OrderedDict(sorted(results.items(), key=lambda t: t[0]))
def parse_pb(pb):
results = {}
for f, v in pb.features.ListFields():
for kk, vv in v.items():
for kkk, vvv in vv.ListFields():
if len(vvv.value) == 0:
results[kk] = ''
elif type(vvv.value[0]) == bytes:
results[kk] = vvv.value[0].decode("utf-8")
else:
results[kk] = vvv.value[0]
return results | /salure_tfx_extensions-0.2.15.tar.gz/salure_tfx_extensions-0.2.15/salure_tfx_extensions/components/mysql_pusher_percentile/executor.py | 0.753829 | 0.302044 | executor.py | pypi |
from tensorflow_serving.apis import prediction_log_pb2
from tfx.components.base import base_executor
import apache_beam as beam
from tfx.types import artifact_utils
from tfx.utils import io_utils
def parse_predictlog(pb):
predict_val = None
response_tensor = pb.predict_log.response.outputs["output"]
if len(response_tensor.half_val) != 0:
predict_val = response_tensor.half_val[0]
elif len(response_tensor.float_val) != 0:
predict_val = response_tensor.float_val[0]
elif len(response_tensor.double_val) != 0:
predict_val = response_tensor.double_val[0]
elif len(response_tensor.int_val) != 0:
predict_val = response_tensor.int_val[0]
elif len(response_tensor.string_val) != 0:
predict_val = response_tensor.string_val[0]
elif len(response_tensor.int64_val) != 0:
predict_val = response_tensor.int64_val[0]
elif len(response_tensor.bool_val) != 0:
predict_val = response_tensor.bool_val[0]
elif len(response_tensor.uint32_val) != 0:
predict_val = response_tensor.uint32_val[0]
elif len(response_tensor.uint64_val) != 0:
predict_val = response_tensor.uint64_val[0]
if predict_val is None:
ValueError("Encountered response tensor with unknown value")
return predict_val
class Executor(base_executor.BaseExecutor):
def Do(self, input_dict, output_dict, exec_properties):
self._log_startup(input_dict, output_dict, exec_properties)
predictions = artifact_utils.get_single_instance(input_dict['inference_result'])
predictions_path = predictions.uri
predictions_uri = io_utils.all_files_pattern(predictions_path)
num_quantiles = int(exec_properties['num_quantiles']) + 1
output_examples_uri = artifact_utils.get_single_uri(output_dict['percentile_values'])
with beam.Pipeline() as pipeline:
train_data = (pipeline
| 'ReadPredictionLogs' >> beam.io.ReadFromTFRecord(
predictions_uri,
coder=beam.coders.ProtoCoder(prediction_log_pb2.PredictionLog))
| 'ParsePredictionLogs' >> beam.Map(parse_predictlog))
quantiles = (train_data | 'Quantiles globally' >> beam.transforms.stats.ApproximateQuantiles.Globally(
num_quantiles=num_quantiles)
| 'WriteToFile' >> beam.io.WriteToText(
file_path_prefix=output_examples_uri + '/percentile_values',
shard_name_template='',
file_name_suffix='.txt')) | /salure_tfx_extensions-0.2.15.tar.gz/salure_tfx_extensions-0.2.15/salure_tfx_extensions/components/percentile/executor.py | 0.534612 | 0.37222 | executor.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Dict, Optional, Text, Union
from absl import logging
from tfx import types
from tfx.components.example_gen import component
from salure_tfx_extensions.components.csv_example_gen import executor
from tfx.dsl.components.base import executor_spec
from tfx.proto import example_gen_pb2
from tfx.proto import range_config_pb2
from tfx.types import artifact_utils
class CsvExampleGen(component.FileBasedExampleGen): # pylint: disable=protected-access
"""Custom TFX CsvExampleGen component with dtype definition for specific columns.
The csv examplegen component takes csv data, and generates train
and eval examples for downstream components.
The csv examplegen encodes column values to tf.Example int/float/byte feature.
For the case when there's missing cells, the csv examplegen uses:
-- tf.train.Feature(`type`_list=tf.train.`type`List(value=[])), when the
`type` can be inferred.
-- tf.train.Feature() when it cannot infer the `type` from the column.
Note that the type inferring will be per input split. If input isn't a single
split, users need to ensure the column types align in each pre-splits.
For example, given the following csv rows of a split:
header:A,B,C,D
row1: 1,,x,0.1
row2: 2,,y,0.2
row3: 3,,,0.3
row4:
The output example will be
example1: 1(int), empty feature(no type), x(string), 0.1(float)
example2: 2(int), empty feature(no type), x(string), 0.2(float)
example3: 3(int), empty feature(no type), empty list(string), 0.3(float)
Note that the empty feature is `tf.train.Feature()` while empty list string
feature is `tf.train.Feature(bytes_list=tf.train.BytesList(value=[]))`.
"""
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)
def __init__(
self,
input: Optional[types.Channel] = None, # pylint: disable=redefined-builtin
input_base: Optional[Text] = None,
input_config: Optional[Union[example_gen_pb2.Input, Dict[Text,
Any]]] = None,
output_config: Optional[Union[example_gen_pb2.Output, Dict[Text,
Any]]] = None,
range_config: Optional[Union[range_config_pb2.RangeConfig,
Dict[Text, Any]]] = None,
example_artifacts: Optional[types.Channel] = None,
instance_name: Optional[Text] = None):
"""Construct a CsvExampleGen component.
Args:
input: A Channel of type `standard_artifacts.ExternalArtifact`, which
includes one artifact whose uri is an external directory containing the
CSV files. (Deprecated by input_base)
input_base: an external directory containing the CSV files.
input_config: An example_gen_pb2.Input instance, providing input
configuration. If unset, the files under input_base will be treated as a
single split. If any field is provided as a RuntimeParameter,
input_config should be constructed as a dict with the same field names
as Input proto message.
output_config: An example_gen_pb2.Output instance, providing output
configuration. If unset, default splits will be 'train' and 'eval' with
size 2:1. If any field is provided as a RuntimeParameter,
output_config should be constructed as a dict with the same field names
as Output proto message.
range_config: An optional range_config_pb2.RangeConfig instance,
specifying the range of span values to consider. If unset, driver will
default to searching for latest span with no restrictions.
example_artifacts: Optional channel of 'ExamplesPath' for output train and
eval examples.
instance_name: Optional unique instance name. Necessary if multiple
CsvExampleGen components are declared in the same pipeline.
"""
if input:
input_base = artifact_utils.get_single_uri(list(input.get()))
super(CsvExampleGen, self).__init__(
input_base=input_base,
input_config=input_config,
output_config=output_config,
range_config=range_config,
example_artifacts=example_artifacts,
instance_name=instance_name) | /salure_tfx_extensions-0.2.15.tar.gz/salure_tfx_extensions-0.2.15/salure_tfx_extensions/components/csv_example_gen/component.py | 0.942394 | 0.37889 | component.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from typing import Any, Dict, Iterable, List, Text
from absl import logging
import apache_beam as beam
import tensorflow as tf
from tfx.components.example_gen import utils
from tfx.components.example_gen.base_example_gen_executor import BaseExampleGenExecutor
from tfx.dsl.io import fileio
from tfx.utils import io_utils
from tfx_bsl.coders import csv_decoder
def _int_handler(cell: csv_decoder.CSVCell) -> tf.train.Feature:
value_list = []
if cell:
value_list.append(int(cell))
return tf.train.Feature(int64_list=tf.train.Int64List(value=value_list))
def _float_handler(cell: csv_decoder.CSVCell) -> tf.train.Feature:
value_list = []
if cell:
value_list.append(float(cell))
return tf.train.Feature(float_list=tf.train.FloatList(value=value_list))
def _bytes_handler(cell: csv_decoder.CSVCell) -> tf.train.Feature:
value_list = []
if cell:
value_list.append(cell)
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value_list))
@beam.typehints.with_input_types(List[csv_decoder.CSVCell],
List[csv_decoder.ColumnInfo])
@beam.typehints.with_output_types(tf.train.Example)
class _ParsedCsvToTfExample(beam.DoFn):
"""A beam.DoFn to convert a parsed CSV line to a tf.Example."""
def __init__(self):
self._column_handlers = None
def _make_column_handlers(self, column_infos: List[csv_decoder.ColumnInfo]):
result = []
for column_info in column_infos:
# pylint: disable=g-long-lambda
if column_info.name in ['medewerker_id', 'looncomponent_extern_nummer','werkgever_id','salarisverwerkingsplan_id']:
handler_fn = _bytes_handler
elif column_info.type == csv_decoder.ColumnType.INT:
handler_fn = _int_handler
elif column_info.type == csv_decoder.ColumnType.FLOAT:
handler_fn = _float_handler
elif column_info.type == csv_decoder.ColumnType.STRING:
handler_fn = _bytes_handler
else:
handler_fn = None
result.append((column_info.name, handler_fn))
return result
def process(
self, csv_cells: List[csv_decoder.CSVCell],
column_infos: List[csv_decoder.ColumnInfo]) -> Iterable[tf.train.Example]:
if not self._column_handlers:
self._column_handlers = self._make_column_handlers(column_infos)
# skip blank lines.
if not csv_cells:
return
if len(csv_cells) != len(self._column_handlers):
raise ValueError('Invalid CSV line: {}'.format(csv_cells))
feature = {}
for csv_cell, (column_name, handler_fn) in zip(csv_cells,
self._column_handlers):
feature[column_name] = (
handler_fn(csv_cell) if handler_fn else tf.train.Feature())
yield tf.train.Example(features=tf.train.Features(feature=feature))
@beam.ptransform_fn
@beam.typehints.with_input_types(beam.Pipeline)
@beam.typehints.with_output_types(tf.train.Example)
def _CsvToExample( # pylint: disable=invalid-name
pipeline: beam.Pipeline, exec_properties: Dict[Text, Any],
split_pattern: Text) -> beam.pvalue.PCollection:
"""Read CSV files and transform to TF examples.
Note that each input split will be transformed by this function separately.
Args:
pipeline: beam pipeline.
exec_properties: A dict of execution properties.
- input_base: input dir that contains CSV data. CSV must have header line.
split_pattern: Split.pattern in Input config, glob relative file pattern
that maps to input files with root directory given by input_base.
Returns:
PCollection of TF examples.
Raises:
RuntimeError: if split is empty or csv headers are not equal.
"""
input_base_uri = exec_properties[utils.INPUT_BASE_KEY]
csv_pattern = os.path.join(input_base_uri, split_pattern)
logging.info('Processing input csv data %s to TFExample.', csv_pattern)
csv_files = fileio.glob(csv_pattern)
if not csv_files:
raise RuntimeError(
'Split pattern {} does not match any files.'.format(csv_pattern))
column_names = io_utils.load_csv_column_names(csv_files[0])
for csv_file in csv_files[1:]:
if io_utils.load_csv_column_names(csv_file) != column_names:
raise RuntimeError(
'Files in same split {} have different header.'.format(csv_pattern))
parsed_csv_lines = (
pipeline
| 'ReadFromText' >> beam.io.ReadFromText(
file_pattern=csv_pattern, skip_header_lines=1)
| 'ParseCSVLine' >> beam.ParDo(csv_decoder.ParseCSVLine(delimiter=','))
| 'ExtractParsedCSVLines' >> beam.Keys())
column_infos = beam.pvalue.AsSingleton(
parsed_csv_lines
| 'InferColumnTypes' >> beam.CombineGlobally(
csv_decoder.ColumnTypeInferrer(column_names, skip_blank_lines=True)))
return (parsed_csv_lines
| 'ToTFExample' >> beam.ParDo(_ParsedCsvToTfExample(), column_infos))
class Executor(BaseExampleGenExecutor):
"""Generic TFX CSV example gen executor."""
def GetInputSourceToExamplePTransform(self) -> beam.PTransform:
"""Returns PTransform for CSV to TF examples."""
return _CsvToExample | /salure_tfx_extensions-0.2.15.tar.gz/salure_tfx_extensions-0.2.15/salure_tfx_extensions/components/csv_example_gen/executor.py | 0.802633 | 0.1798 | executor.py | pypi |
from typing import Text
from tfx.types import ComponentSpec
from tfx.types.component_spec import ChannelParameter, ExecutionParameter
from tfx.types import standard_artifacts
from tfx.types.artifact import Artifact
from tfx.proto import example_gen_pb2
from salure_tfx_extensions.types import standard_artifacts as stfxe_artifacts
from salure_tfx_extensions.proto import mysql_config_pb2
class BaseSpec(ComponentSpec):
"""Salure_tfx_extensions BaseComponent spec"""
PARAMETERS = {
'input_config': ExecutionParameter(type=example_gen_pb2.Input),
'output_config': ExecutionParameter(type=example_gen_pb2.Output),
}
INPUTS = {
'examples': ChannelParameter(type=standard_artifacts.Examples)
}
OUTPUTS = {
'output_examples': ChannelParameter(type=standard_artifacts.Examples)
}
class LCEmbeddingSpec(ComponentSpec):
"""ComponentSpec for Custom TFX Hello World Component."""
PARAMETERS = {
# These are parameters that will be passed in the call to create an instance of this component.
# 'name': ExecutionParameter(type=Text),
'mapping_file_path':ExecutionParameter(type=(str, Text)),
'feature_description': ExecutionParameter(type=(str, Text)) # new parameter for reading f_desc_emb.json
}
INPUTS = {
# This will be a dictionary with input artifacts, including URIs
'input_data': ChannelParameter(type=standard_artifacts.Examples),
}
OUTPUTS = {
# This will be a dictionary which this component will populate
'output_data': ChannelParameter(type=standard_artifacts.Examples),
}
class CopyFileSpec(ComponentSpec):
"""ComponentSpec for Custom TFX Hello World Component."""
PARAMETERS = {
# These are parameters that will be passed in the call to create an instance of this component.
# 'name': ExecutionParameter(type=Text),
'input_path':ExecutionParameter(type=(str, Text)),
'output_path':ExecutionParameter(type=(str, Text))
}
INPUTS = {
# This will be a dictionary with input artifacts, including URIs
}
OUTPUTS = {
# This will be a dictionary which this component will populate
}
class PercentileComponentSpec(ComponentSpec):
"""ComponentSpec for Custom TFX Hello World Component."""
PARAMETERS = {
# These are parameters that will be passed in the call to create an instance of this component.
'num_quantiles': ExecutionParameter(type=Text),
}
INPUTS = {
'inference_result': ChannelParameter(type=standard_artifacts.InferenceResult),
}
OUTPUTS = {
# This will be a dictionary which this component will populate
'percentile_values': ChannelParameter(type=standard_artifacts.Examples),
}
class CsvToExampleSpec(ComponentSpec):
"""Transform component spec."""
PARAMETERS = {
'feature_description': ExecutionParameter(type=(str, Text)),
'input_path': ExecutionParameter(type=(str, Text))
}
INPUTS = {
}
OUTPUTS = {
'examples':
ChannelParameter(type=standard_artifacts.Examples)
}
class UploadedfilesEmbeddingSpec(ComponentSpec):
"""ComponentSpec for Custom TFX UploadedfilesEmbedding Component."""
PARAMETERS = {
# These are parameters that will be passed in the call to create an instance of this component.
'feature_description': ExecutionParameter(type=(str, Text)), # new parameter for reading f_desc_upl.json
}
INPUTS = {
# This will be a dictionary with input artifacts, including URIs
'input_data': ChannelParameter(type=standard_artifacts.Examples),
'mapping_data': ChannelParameter(type=standard_artifacts.Examples)
}
OUTPUTS = {
# This will be a dictionary which this component will populate
'output_data': ChannelParameter(type=standard_artifacts.Examples),
}
class MySQLPusherSpec(ComponentSpec):
"""Salure_tfx_extensions MySQLPusher spec"""
PARAMETERS = {
'connection_config': ExecutionParameter(type=mysql_config_pb2.MySQLConnConfig),
'table_name': ExecutionParameter(type=(str, Text)),
'inference_id': ExecutionParameter(type=(str, Text))
}
INPUTS = {
'inference_result': ChannelParameter(type=standard_artifacts.InferenceResult),
'percentile_values': ChannelParameter(type=standard_artifacts.Examples)
}
OUTPUTS = dict()
class SKLearnTrainerSpec(ComponentSpec):
"""Salure_tfx_extensions SKLearnTrainer spec"""
PARAMETERS = {
'model_pickle': ExecutionParameter(type=(bytes, Text)),
'label_name': ExecutionParameter(type=(str, Text)), # If None: unsupervised
}
INPUTS = {
'examples': ChannelParameter(type=standard_artifacts.Examples),
'schema': ChannelParameter(type=standard_artifacts.Schema),
}
OUTPUTS = {
'transformed_examples': ChannelParameter(type=standard_artifacts.Examples),
'model': ChannelParameter(type=stfxe_artifacts.SKLearnModel),
}
class SKLearnTransformSpec(ComponentSpec):
"""Salure_tfx_extensions SKLearnTransform spec"""
PARAMETERS = {
# 'module_file': ExecutionParameter(type=(str, Text), optional=True),
# 'preprocessor_pipeline_name': ExecutionParameter(type=(str, Text), optional=True),
'preprocessor_pickle': ExecutionParameter(type=(str, Text))
# 'data_format': ExecutionParameter(type=(str, Text), optional=True), # Default will be pandas
}
INPUTS = {
'examples': ChannelParameter(type=standard_artifacts.Examples),
'schema': ChannelParameter(type=standard_artifacts.Schema)
}
OUTPUTS = {
'transformed_examples': ChannelParameter(type=standard_artifacts.Examples),
'transform_pipeline': ChannelParameter(type=stfxe_artifacts.SKLearnPrepocessor)
}
class PusherSpec(ComponentSpec):
"""Salure_tfx_extensions Pusher spec"""
PARAMETERS = {
'push_destination': ExecutionParameter(type=(str, Text)),
'timestamp_versioning': ExecutionParameter(type=bool)
}
INPUTS = {
'pushable': ChannelParameter(type=Artifact), # Allow for any artifact type to be pushed
'model_blessing': ChannelParameter(type=standard_artifacts.ModelBlessing, optional=True),
'infra_blessing': ChannelParameter(type=standard_artifacts.InfraBlessing, optional=True)
}
OUTPUTS = {
'pushed_model': ChannelParameter(type=standard_artifacts.PushedModel)
} | /salure_tfx_extensions-0.2.15.tar.gz/salure_tfx_extensions-0.2.15/salure_tfx_extensions/types/component_specs.py | 0.863132 | 0.291075 | component_specs.py | pypi |
from string import Template
from typing import Optional, List, Any
import json
from kfp import dsl
from salure_tfx_extensions.deployments.base_deployment import BaseDeployment
from tfx.components.base.base_component import BaseComponent
class TensorflowDeployment(BaseDeployment):
# TODO: Figure out signature name
TEMPLATE = Template("""
{
"apiVersion": "machinelearning.seldon.io/v1alpha2",
"kind": "SeldonDeployment",
"metadata": {
"name": "$deployment_name"
},
"spec": {
"name": "$deployment_name",
"predictors": [
{
"graph": {
"children": [],
"implementation": "TENSORFLOW_SERVER",
"modelUri": "pvc://$pvc_name/$model_location",
"name": "$deployment_name",
"parameters": [
{
"name": "signature_name",
"type": "STRING",
"value": "$signature_name"
},
{
"name": "model_name",
"type": "STRING",
"value": "$model_name"
}
]
},
"name": "$deployment_name",
"replicas": 1
}
]
}
}
""")
def __init__(self,
deployment_name: str,
pvc_name: str,
signature_name: str,
model_name: str,
dependents: Optional[List[BaseComponent]] = None,
model_location: Optional[str] = None):
self.deployment_name = deployment_name
self.pvc_name = pvc_name
self.signature_name = signature_name
self.model_name = model_name
self.model_location = model_location or 'data'
self._dependents = dependents
self._deployment = TensorflowDeployment.TEMPLATE.substitute(
deployment_name=deployment_name,
pvc_name=pvc_name,
signature_name=signature_name,
model_name=model_name,
model_location=model_location,
)
# @property
def resource_op(self) -> dsl.ResourceOp:
"""Generates a kfp dsl ResourceOp.
Must be initialized in the runtime of the function passed to the kfp compiler
for the compiler to register it."""
return dsl.ResourceOp(
name=self.deployment_name,
action='apply',
k8s_resource=json.loads(self._deployment),
success_condition='status.state == Available'
)
@property
def dependents(self) -> Optional[List[BaseComponent]]:
return self._dependents | /salure_tfx_extensions-0.2.15.tar.gz/salure_tfx_extensions-0.2.15/salure_tfx_extensions/deployments/tensorflow_deployment.py | 0.580233 | 0.218128 | tensorflow_deployment.py | pypi |
Salvage
=======
.. include:: ../../README.rst
Choosing Parameters
-------------------
Salvage is designed to accomplish two somewhat competing goals: to minimize the
risk both of disclosing and of losing some data. Given:
* :math:`n \equiv` the total number of participants or shares.
* :math:`t \equiv` the number of shares required to recover the data (the
threshold).
* :math:`t' = n - t + 1 \equiv` the number of shares that must be lost to
lose the data.
* :math:`p_d \equiv` the chance of disclosing any given share.
* :math:`p_l \equiv` the chance of losing any given share.
We can calculate the chances of disclosure or loss of the original data as:
* :math:`p_{disc} = 1 - (1 - p_d^t)^\binom{n}{t}`
* :math:`p_{loss} = 1 - (1 - p_l^{t'})^\binom{n}{t'}`
High values of :math:`t` will give you a very low :math:`p_{disc}`, but
:math:`p_{loss}` could easily exceed :math:`p_l` itself. Very low values of
:math:`t` will do the reverse. Unless you're far more concerned with one over
the other, :math:`t` should typically be 40-60% of :math:`n`.
Calculator
~~~~~~~~~~
.. raw:: html
<table id="parameters">
<tbody>
<tr>
<td>Participants:</td>
<td><span id="n-display"></span></td>
<td><div id="n-slider"></div></td>
</tr>
<tr>
<td>Threshold:</td>
<td><span id="t-display"></span></td>
<td><div id="t-slider"></div></td>
</tr>
</tbody>
</table>
<table id="probabilities">
<tbody>
<tr>
<td><label for="p_d">Chance of disclosing one share:</label></td>
<td><input id="p_d" name="p_d"></input> %</td>
<td>Chance of disclosing secure data:</td>
<td><span id="p_disc"></span>%</td>
</tr>
<tr>
<td><label for="p_l">Chance of losing one share:</label></td>
<td><input id="p_l" name="p_l"></input> %</td>
<td>Chance of losing secure data:</td>
<td><span id="p_loss"></span>%</td>
</tr>
</tbody>
</table>
Practical Considerations
------------------------
The risks of disclosure and loss can never be entirely eliminated, but there are
several things that can be done to further reduce them.
Avoiding Disclosure
~~~~~~~~~~~~~~~~~~~
This is the easier one, as all of the usual rules apply. Each share of a salvage
kit should be handled as if it were the raw data. Ideally, it will only exist on
physical media and be stored like any other valuable and sensitive document. You
can always apply extra protection to each share, such as encrypting it with the
public key of the intended recipient.
Depending on your level of paranoia, you might also give some thought to how you
prepare the kit. In order to create it, you need to have the original
information assembled in the clear. If you're doing this on a normal
internet-connected machine, the data may be compromised before you've even
protected it.
Consider using a clean air-gapped machine or booting from a read-only operating
system such as `Tails <https://tails.boum.org/>`_. You might also assemble the
sensitive data in a RAM disk to avoid committing it to any persistent storage.
Similarly, when a new kit is created, all of the pieces are stored together.
Consider where these are being written and try to separate them as soon as
possible.
Avoiding Loss
~~~~~~~~~~~~~
Salvage itself takes several steps to minimize the risk that a kit will become
unrecoverable:
* Every share in a salvage kit includes a full copy of the program that created
it. It is not necessary to download or install any Python packages in order to
run the main script.
* Every share also includes a README with detailed instructions for using
salvage.py. This includes instructions for OS X and Windows users who are not
accustomed to running Python scripts.
* The README in each share also includes detailed instructions for manually
reconstructing the master key and decrypting the data, in case the Python
script can not be run for any reason.
Here are a few additional recommendations for minimizing the risk of ultimate
data loss:
* Store the data well. No digital media lasts forever, but do some research on
the current state of the art. If burning to optical media, buy high quality
media designed for archiving. It's also a good idea to print everything out on
acid-free paper. Ink on paper lasts a long time and OCR scanners are easy to
come by.
* Refresh salvage kits periodically. Consider how long your storage media is
expected to last and regenerate the kit well before that. This is also a good
way to audit the previous kit and make sure none of the shares have gone
missing.
* Test the recovery process. You don't necessarily need to do this with the real
data. Create a sample recovery kit with a nonce and give it to the same people
who hold the real thing. Make sure they can successfully reassemble the test
kit without assistance. Add your own documentation if the standard README is
not sufficient for your needs. (This mainly applies when your audience is not
especially technical).
Technical Details
-----------------
This section has a quick technical description of how salvage works. The
cryptography involved is pretty trivial, so the bulk of the code is concerned
with packaging and logistics. Following is the general procedure used to create
a new salvage kit with :math:`n` participants and a threshold of :math:`t`.
#. The source data is archived, compressed, and encrypted with a random 128-bit
key (rendered to a string for `gpg`_). We also use the key to generate a
SHA-256-HMAC of the unencrypted archive.
#. For every unique set of :math:`t` participants (of which there are
:math:`\binom{n}{t}`), :math:`t - 1` random keys are generated. These are
combined with the master key by xoring the bytes to produce a final random
key. We now have :math:`t` partial keys that xor to the master key. This can
be visualized as a partially filled table of key material, one row for each
:math:`t`-sized subset of :math:`n` and one column for each participant
:math:`[0,n)`. The values in each row xor to the same master key.
#. :math:`n` directories are created, each representing one share. Each share
gets its own identical copy of the encrypted archive, plus some metadata in a
json file. The metadata includes:
* A version.
* A common UUID identifying the kit as a whole.
* The index of that particular share.
* The HMAC value.
* The values of :math:`n` and :math:`t`.
* A table of key material.
The key material is essentially one column of the full key table: all of the
partial keys that belong to this share, associated with a subgroup. In other
words, it says "to combine shares 0, 1, and 2, use k1; else to combine shares
0, 1, and 3, use k2; ...".
When :math:`t` shares are brought together, one row of the key table can be
fully reassembled, which means the master key can be recovered and the archive
decrypted.
Changes
-------
.. include:: ../../CHANGES
LICENSE
-------
.. include:: ../../LICENSE
| /salvage-0.1.3.tar.gz/salvage-0.1.3/docs/source/index.rst | 0.819533 | 0.800068 | index.rst | pypi |
from dataclasses import dataclass
from typing import List, Optional, Tuple
from salvia.types.blockchain_format.foliage import Foliage
from salvia.types.blockchain_format.reward_chain_block import RewardChainBlock, RewardChainBlockUnfinished
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from salvia.types.blockchain_format.vdf import VDFInfo, VDFProof
from salvia.types.end_of_slot_bundle import EndOfSubSlotBundle
from salvia.util.ints import uint8, uint32, uint64, uint128
from salvia.util.streamable import Streamable, streamable
"""
Protocol between timelord and full node.
Note: When changing this file, also change protocol_message_types.py, and the protocol version in shared_protocol.py
"""
@dataclass(frozen=True)
@streamable
class NewPeakTimelord(Streamable):
reward_chain_block: RewardChainBlock
difficulty: uint64
deficit: uint8
sub_slot_iters: uint64 # SSi in the slot where NewPeak has been infused
sub_epoch_summary: Optional[
SubEpochSummary
] # If NewPeak is the last slot in epoch, the next slot should include this
previous_reward_challenges: List[Tuple[bytes32, uint128]]
last_challenge_sb_or_eos_total_iters: uint128
passes_ses_height_but_not_yet_included: bool
@dataclass(frozen=True)
@streamable
class NewUnfinishedBlockTimelord(Streamable):
reward_chain_block: RewardChainBlockUnfinished # Reward chain trunk data
difficulty: uint64
sub_slot_iters: uint64 # SSi in the slot where block is infused
foliage: Foliage # Reward chain foliage data
sub_epoch_summary: Optional[SubEpochSummary] # If this is the last slot in epoch, the next slot should include this
# This is the last thing infused in the reward chain before this signage point.
# The challenge that the SP reward chain VDF is based off of, or in the case of sp index 0, the previous infusion
rc_prev: bytes32
@dataclass(frozen=True)
@streamable
class NewInfusionPointVDF(Streamable):
unfinished_reward_hash: bytes32
challenge_chain_ip_vdf: VDFInfo
challenge_chain_ip_proof: VDFProof
reward_chain_ip_vdf: VDFInfo
reward_chain_ip_proof: VDFProof
infused_challenge_chain_ip_vdf: Optional[VDFInfo]
infused_challenge_chain_ip_proof: Optional[VDFProof]
@dataclass(frozen=True)
@streamable
class NewSignagePointVDF(Streamable):
index_from_challenge: uint8
challenge_chain_sp_vdf: VDFInfo
challenge_chain_sp_proof: VDFProof
reward_chain_sp_vdf: VDFInfo
reward_chain_sp_proof: VDFProof
@dataclass(frozen=True)
@streamable
class NewEndOfSubSlotVDF(Streamable):
end_of_sub_slot_bundle: EndOfSubSlotBundle
@dataclass(frozen=True)
@streamable
class RequestCompactProofOfTime(Streamable):
new_proof_of_time: VDFInfo
header_hash: bytes32
height: uint32
field_vdf: uint8
@dataclass(frozen=True)
@streamable
class RespondCompactProofOfTime(Streamable):
vdf_info: VDFInfo
vdf_proof: VDFProof
header_hash: bytes32
height: uint32
field_vdf: uint8 | /salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/protocols/timelord_protocol.py | 0.895466 | 0.362433 | timelord_protocol.py | pypi |
from dataclasses import dataclass
from typing import List, Optional
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.types.blockchain_format.vdf import VDFInfo, VDFProof
from salvia.types.end_of_slot_bundle import EndOfSubSlotBundle
from salvia.types.full_block import FullBlock
from salvia.types.peer_info import TimestampedPeerInfo
from salvia.types.spend_bundle import SpendBundle
from salvia.types.unfinished_block import UnfinishedBlock
from salvia.types.weight_proof import WeightProof
from salvia.util.ints import uint8, uint32, uint64, uint128
from salvia.util.streamable import Streamable, streamable
"""
Protocol between full nodes.
Note: When changing this file, also change protocol_message_types.py, and the protocol version in shared_protocol.py
"""
@dataclass(frozen=True)
@streamable
class NewPeak(Streamable):
header_hash: bytes32
height: uint32
weight: uint128
fork_point_with_previous_peak: uint32
unfinished_reward_block_hash: bytes32
@dataclass(frozen=True)
@streamable
class NewTransaction(Streamable):
transaction_id: bytes32
cost: uint64
fees: uint64
@dataclass(frozen=True)
@streamable
class RequestTransaction(Streamable):
transaction_id: bytes32
@dataclass(frozen=True)
@streamable
class RespondTransaction(Streamable):
transaction: SpendBundle
@dataclass(frozen=True)
@streamable
class RequestProofOfWeight(Streamable):
total_number_of_blocks: uint32
tip: bytes32
@dataclass(frozen=True)
@streamable
class RespondProofOfWeight(Streamable):
wp: WeightProof
tip: bytes32
@dataclass(frozen=True)
@streamable
class RequestBlock(Streamable):
height: uint32
include_transaction_block: bool
@dataclass(frozen=True)
@streamable
class RejectBlock(Streamable):
height: uint32
@dataclass(frozen=True)
@streamable
class RequestBlocks(Streamable):
start_height: uint32
end_height: uint32
include_transaction_block: bool
@dataclass(frozen=True)
@streamable
class RespondBlocks(Streamable):
start_height: uint32
end_height: uint32
blocks: List[FullBlock]
@dataclass(frozen=True)
@streamable
class RejectBlocks(Streamable):
start_height: uint32
end_height: uint32
@dataclass(frozen=True)
@streamable
class RespondBlock(Streamable):
block: FullBlock
@dataclass(frozen=True)
@streamable
class NewUnfinishedBlock(Streamable):
unfinished_reward_hash: bytes32
@dataclass(frozen=True)
@streamable
class RequestUnfinishedBlock(Streamable):
unfinished_reward_hash: bytes32
@dataclass(frozen=True)
@streamable
class RespondUnfinishedBlock(Streamable):
unfinished_block: UnfinishedBlock
@dataclass(frozen=True)
@streamable
class NewSignagePointOrEndOfSubSlot(Streamable):
prev_challenge_hash: Optional[bytes32]
challenge_hash: bytes32
index_from_challenge: uint8
last_rc_infusion: bytes32
@dataclass(frozen=True)
@streamable
class RequestSignagePointOrEndOfSubSlot(Streamable):
challenge_hash: bytes32
index_from_challenge: uint8
last_rc_infusion: bytes32
@dataclass(frozen=True)
@streamable
class RespondSignagePoint(Streamable):
index_from_challenge: uint8
challenge_chain_vdf: VDFInfo
challenge_chain_proof: VDFProof
reward_chain_vdf: VDFInfo
reward_chain_proof: VDFProof
@dataclass(frozen=True)
@streamable
class RespondEndOfSubSlot(Streamable):
end_of_slot_bundle: EndOfSubSlotBundle
@dataclass(frozen=True)
@streamable
class RequestMempoolTransactions(Streamable):
filter: bytes
@dataclass(frozen=True)
@streamable
class NewCompactVDF(Streamable):
height: uint32
header_hash: bytes32
field_vdf: uint8
vdf_info: VDFInfo
@dataclass(frozen=True)
@streamable
class RequestCompactVDF(Streamable):
height: uint32
header_hash: bytes32
field_vdf: uint8
vdf_info: VDFInfo
@dataclass(frozen=True)
@streamable
class RespondCompactVDF(Streamable):
height: uint32
header_hash: bytes32
field_vdf: uint8
vdf_info: VDFInfo
vdf_proof: VDFProof
@dataclass(frozen=True)
@streamable
class RequestPeers(Streamable):
"""
Return full list of peers
"""
@dataclass(frozen=True)
@streamable
class RespondPeers(Streamable):
peer_list: List[TimestampedPeerInfo] | /salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/protocols/full_node_protocol.py | 0.85292 | 0.376107 | full_node_protocol.py | pypi |
from dataclasses import dataclass
from typing import List, Optional, Tuple
from salvia.types.blockchain_format.coin import Coin
from salvia.types.blockchain_format.program import Program
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.types.header_block import HeaderBlock
from salvia.types.spend_bundle import SpendBundle
from salvia.util.ints import uint8, uint32, uint128
from salvia.util.streamable import Streamable, streamable
"""
Protocol between wallet (SPV node) and full node.
Note: When changing this file, also change protocol_message_types.py, and the protocol version in shared_protocol.py
"""
@dataclass(frozen=True)
@streamable
class RequestPuzzleSolution(Streamable):
coin_name: bytes32
height: uint32
@dataclass(frozen=True)
@streamable
class PuzzleSolutionResponse(Streamable):
coin_name: bytes32
height: uint32
puzzle: Program
solution: Program
@dataclass(frozen=True)
@streamable
class RespondPuzzleSolution(Streamable):
response: PuzzleSolutionResponse
@dataclass(frozen=True)
@streamable
class RejectPuzzleSolution(Streamable):
coin_name: bytes32
height: uint32
@dataclass(frozen=True)
@streamable
class SendTransaction(Streamable):
transaction: SpendBundle
@dataclass(frozen=True)
@streamable
class TransactionAck(Streamable):
txid: bytes32
status: uint8 # MempoolInclusionStatus
error: Optional[str]
@dataclass(frozen=True)
@streamable
class NewPeakWallet(Streamable):
header_hash: bytes32
height: uint32
weight: uint128
fork_point_with_previous_peak: uint32
@dataclass(frozen=True)
@streamable
class RequestBlockHeader(Streamable):
height: uint32
@dataclass(frozen=True)
@streamable
class RespondBlockHeader(Streamable):
header_block: HeaderBlock
@dataclass(frozen=True)
@streamable
class RejectHeaderRequest(Streamable):
height: uint32
@dataclass(frozen=True)
@streamable
class RequestRemovals(Streamable):
height: uint32
header_hash: bytes32
coin_names: Optional[List[bytes32]]
@dataclass(frozen=True)
@streamable
class RespondRemovals(Streamable):
height: uint32
header_hash: bytes32
coins: List[Tuple[bytes32, Optional[Coin]]]
proofs: Optional[List[Tuple[bytes32, bytes]]]
@dataclass(frozen=True)
@streamable
class RejectRemovalsRequest(Streamable):
height: uint32
header_hash: bytes32
@dataclass(frozen=True)
@streamable
class RequestAdditions(Streamable):
height: uint32
header_hash: Optional[bytes32]
puzzle_hashes: Optional[List[bytes32]]
@dataclass(frozen=True)
@streamable
class RespondAdditions(Streamable):
height: uint32
header_hash: bytes32
coins: List[Tuple[bytes32, List[Coin]]]
proofs: Optional[List[Tuple[bytes32, bytes, Optional[bytes]]]]
@dataclass(frozen=True)
@streamable
class RejectAdditionsRequest(Streamable):
height: uint32
header_hash: bytes32
@dataclass(frozen=True)
@streamable
class RequestHeaderBlocks(Streamable):
start_height: uint32
end_height: uint32
@dataclass(frozen=True)
@streamable
class RejectHeaderBlocks(Streamable):
start_height: uint32
end_height: uint32
@dataclass(frozen=True)
@streamable
class RespondHeaderBlocks(Streamable):
start_height: uint32
end_height: uint32
header_blocks: List[HeaderBlock]
@dataclass(frozen=True)
@streamable
class CoinState(Streamable):
coin: Coin
spent_height: Optional[uint32]
created_height: Optional[uint32]
@dataclass(frozen=True)
@streamable
class RegisterForPhUpdates(Streamable):
puzzle_hashes: List[bytes32]
min_height: uint32
@dataclass(frozen=True)
@streamable
class RespondToPhUpdates(Streamable):
puzzle_hashes: List[bytes32]
min_height: uint32
coin_states: List[CoinState]
@dataclass(frozen=True)
@streamable
class RegisterForCoinUpdates(Streamable):
coin_ids: List[bytes32]
min_height: uint32
@dataclass(frozen=True)
@streamable
class RespondToCoinUpdates(Streamable):
coin_ids: List[bytes32]
min_height: uint32
coin_states: List[CoinState]
@dataclass(frozen=True)
@streamable
class CoinStateUpdate(Streamable):
height: uint32
fork_height: uint32
peak_hash: bytes32
items: List[CoinState]
@dataclass(frozen=True)
@streamable
class RequestChildren(Streamable):
coin_name: bytes32
@dataclass(frozen=True)
@streamable
class RespondChildren(Streamable):
coin_states: List[CoinState]
@dataclass(frozen=True)
@streamable
class RequestSESInfo(Streamable):
start_height: uint32
end_height: uint32
@dataclass(frozen=True)
@streamable
class RespondSESInfo(Streamable):
reward_chain_hash: List[bytes32]
heights: List[List[uint32]] | /salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/protocols/wallet_protocol.py | 0.875594 | 0.497376 | wallet_protocol.py | pypi |
from dataclasses import dataclass
from enum import Enum
import time
from typing import Optional
from blspy import G1Element, G2Element
from salvia.types.blockchain_format.proof_of_space import ProofOfSpace
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.util.ints import uint8, uint16, uint32, uint64
from salvia.util.streamable import Streamable, streamable
POOL_PROTOCOL_VERSION = uint8(1)
class PoolErrorCode(Enum):
REVERTED_SIGNAGE_POINT = 1
TOO_LATE = 2
NOT_FOUND = 3
INVALID_PROOF = 4
PROOF_NOT_GOOD_ENOUGH = 5
INVALID_DIFFICULTY = 6
INVALID_SIGNATURE = 7
SERVER_EXCEPTION = 8
INVALID_P2_SINGLETON_PUZZLE_HASH = 9
FARMER_NOT_KNOWN = 10
FARMER_ALREADY_KNOWN = 11
INVALID_AUTHENTICATION_TOKEN = 12
INVALID_PAYOUT_INSTRUCTIONS = 13
INVALID_SINGLETON = 14
DELAY_TIME_TOO_SHORT = 15
REQUEST_FAILED = 16
# Used to verify GET /farmer and GET /login
@dataclass(frozen=True)
@streamable
class AuthenticationPayload(Streamable):
method_name: str
launcher_id: bytes32
target_puzzle_hash: bytes32
authentication_token: uint64
# GET /pool_info
@dataclass(frozen=True)
@streamable
class GetPoolInfoResponse(Streamable):
name: str
logo_url: str
minimum_difficulty: uint64
relative_lock_height: uint32
protocol_version: uint8
fee: str
description: str
target_puzzle_hash: bytes32
authentication_token_timeout: uint8
# POST /partial
@dataclass(frozen=True)
@streamable
class PostPartialPayload(Streamable):
launcher_id: bytes32
authentication_token: uint64
proof_of_space: ProofOfSpace
sp_hash: bytes32
end_of_sub_slot: bool
harvester_id: bytes32
@dataclass(frozen=True)
@streamable
class PostPartialRequest(Streamable):
payload: PostPartialPayload
aggregate_signature: G2Element
# Response in success case
@dataclass(frozen=True)
@streamable
class PostPartialResponse(Streamable):
new_difficulty: uint64
# GET /farmer
# Response in success case
@dataclass(frozen=True)
@streamable
class GetFarmerResponse(Streamable):
authentication_public_key: G1Element
payout_instructions: str
current_difficulty: uint64
current_points: uint64
# POST /farmer
@dataclass(frozen=True)
@streamable
class PostFarmerPayload(Streamable):
launcher_id: bytes32
authentication_token: uint64
authentication_public_key: G1Element
payout_instructions: str
suggested_difficulty: Optional[uint64]
@dataclass(frozen=True)
@streamable
class PostFarmerRequest(Streamable):
payload: PostFarmerPayload
signature: G2Element
# Response in success case
@dataclass(frozen=True)
@streamable
class PostFarmerResponse(Streamable):
welcome_message: str
# PUT /farmer
@dataclass(frozen=True)
@streamable
class PutFarmerPayload(Streamable):
launcher_id: bytes32
authentication_token: uint64
authentication_public_key: Optional[G1Element]
payout_instructions: Optional[str]
suggested_difficulty: Optional[uint64]
@dataclass(frozen=True)
@streamable
class PutFarmerRequest(Streamable):
payload: PutFarmerPayload
signature: G2Element
# Response in success case
@dataclass(frozen=True)
@streamable
class PutFarmerResponse(Streamable):
authentication_public_key: Optional[bool]
payout_instructions: Optional[bool]
suggested_difficulty: Optional[bool]
# Misc
# Response in error case for all endpoints of the pool protocol
@dataclass(frozen=True)
@streamable
class ErrorResponse(Streamable):
error_code: uint16
error_message: Optional[str]
# Get the current authentication toke according "Farmer authentication" in SPECIFICATION.md
def get_current_authentication_token(timeout: uint8) -> uint64:
return uint64(int(int(time.time() / 60) / timeout))
# Validate a given authentication token against our local time
def validate_authentication_token(token: uint64, timeout: uint8):
return abs(token - get_current_authentication_token(timeout)) <= timeout | /salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/protocols/pool_protocol.py | 0.818519 | 0.202897 | pool_protocol.py | pypi |
from enum import Enum
class ProtocolMessageTypes(Enum):
# Shared protocol (all services)
handshake = 1
# Harvester protocol (harvester <-> farmer)
harvester_handshake = 3
# new_signage_point_harvester = 4 Changed to 66 in new protocol
new_proof_of_space = 5
request_signatures = 6
respond_signatures = 7
# Farmer protocol (farmer <-> full_node)
new_signage_point = 8
declare_proof_of_space = 9
request_signed_values = 10
signed_values = 11
farming_info = 12
# Timelord protocol (timelord <-> full_node)
new_peak_timelord = 13
new_unfinished_block_timelord = 14
new_infusion_point_vdf = 15
new_signage_point_vdf = 16
new_end_of_sub_slot_vdf = 17
request_compact_proof_of_time = 18
respond_compact_proof_of_time = 19
# Full node protocol (full_node <-> full_node)
new_peak = 20
new_transaction = 21
request_transaction = 22
respond_transaction = 23
request_proof_of_weight = 24
respond_proof_of_weight = 25
request_block = 26
respond_block = 27
reject_block = 28
request_blocks = 29
respond_blocks = 30
reject_blocks = 31
new_unfinished_block = 32
request_unfinished_block = 33
respond_unfinished_block = 34
new_signage_point_or_end_of_sub_slot = 35
request_signage_point_or_end_of_sub_slot = 36
respond_signage_point = 37
respond_end_of_sub_slot = 38
request_mempool_transactions = 39
request_compact_vdf = 40
respond_compact_vdf = 41
new_compact_vdf = 42
request_peers = 43
respond_peers = 44
# Wallet protocol (wallet <-> full_node)
request_puzzle_solution = 45
respond_puzzle_solution = 46
reject_puzzle_solution = 47
send_transaction = 48
transaction_ack = 49
new_peak_wallet = 50
request_block_header = 51
respond_block_header = 52
reject_header_request = 53
request_removals = 54
respond_removals = 55
reject_removals_request = 56
request_additions = 57
respond_additions = 58
reject_additions_request = 59
request_header_blocks = 60
reject_header_blocks = 61
respond_header_blocks = 62
# Introducer protocol (introducer <-> full_node)
request_peers_introducer = 63
respond_peers_introducer = 64
# Simulator protocol
farm_new_block = 65
# New harvester protocol
new_signage_point_harvester = 66
request_plots = 67
respond_plots = 68
# More wallet protocol
coin_state_update = 69
register_interest_in_puzzle_hash = 70
respond_to_ph_update = 71
register_interest_in_coin = 72
respond_to_coin_update = 73
request_children = 74
respond_children = 75
request_ses_hashes = 76
respond_ses_hashes = 77 | /salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/protocols/protocol_message_types.py | 0.50952 | 0.186132 | protocol_message_types.py | pypi |
from salvia.protocols.protocol_message_types import ProtocolMessageTypes as pmt, ProtocolMessageTypes
NO_REPLY_EXPECTED = [
# full_node -> full_node messages
pmt.new_peak,
pmt.new_transaction,
pmt.new_unfinished_block,
pmt.new_signage_point_or_end_of_sub_slot,
pmt.request_mempool_transactions,
pmt.new_compact_vdf,
pmt.request_mempool_transactions,
]
"""
VAILD_REPLY_MESSAGE_MAP:
key: sent message type.
value: valid reply message types, from the view of the requester.
A state machine can be built from this message map.
"""
VAILD_REPLY_MESSAGE_MAP = {
# messages for all services
# pmt.handshake is handled in WSSalviaConnection.perform_handshake
# full_node -> full_node protocol messages
pmt.request_transaction: [pmt.respond_transaction],
pmt.request_proof_of_weight: [pmt.respond_proof_of_weight],
pmt.request_block: [pmt.respond_block, pmt.reject_block],
pmt.request_blocks: [pmt.respond_blocks, pmt.reject_blocks],
pmt.request_unfinished_block: [pmt.respond_unfinished_block],
pmt.request_signage_point_or_end_of_sub_slot: [pmt.respond_signage_point, pmt.respond_end_of_sub_slot],
pmt.request_compact_vdf: [pmt.respond_compact_vdf],
pmt.request_peers: [pmt.respond_peers],
}
def static_check_sent_message_response() -> None:
"""Check that allowed message data structures VALID_REPLY_MESSAGE_MAP and NO_REPLY_EXPECTED are consistent."""
# Reply and non-reply sets should not overlap: This check should be static
overlap = set(NO_REPLY_EXPECTED).intersection(set(VAILD_REPLY_MESSAGE_MAP.keys()))
if len(overlap) != 0:
raise AssertionError("Overlapping NO_REPLY_EXPECTED and VAILD_REPLY_MESSAGE_MAP values: {}")
def message_requires_reply(sent: ProtocolMessageTypes) -> bool:
"""Return True if message has an entry in the full node P2P message map"""
# If we knew the peer NodeType is FULL_NODE, we could also check `sent not in NO_REPLY_EXPECTED`
return sent in VAILD_REPLY_MESSAGE_MAP
def message_response_ok(sent: ProtocolMessageTypes, received: ProtocolMessageTypes) -> bool:
"""
Check to see that peers respect protocol message types in reply.
Call with received == None to indicate that we do not expect a specific reply message type.
"""
# Errors below are runtime protocol message mismatches from peers
if sent in VAILD_REPLY_MESSAGE_MAP:
if received not in VAILD_REPLY_MESSAGE_MAP[sent]:
return False
return True
# Run `static_check_sent_message_response` to check this static invariant at import time
static_check_sent_message_response() | /salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/protocols/protocol_state_machine.py | 0.704567 | 0.36328 | protocol_state_machine.py | pypi |
from dataclasses import dataclass
from typing import List, Tuple, Optional
from blspy import G1Element, G2Element
from salvia.types.blockchain_format.proof_of_space import ProofOfSpace
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.util.ints import uint8, uint64
from salvia.util.streamable import Streamable, streamable
"""
Protocol between harvester and farmer.
Note: When changing this file, also change protocol_message_types.py, and the protocol version in shared_protocol.py
"""
@dataclass(frozen=True)
@streamable
class PoolDifficulty(Streamable):
difficulty: uint64
sub_slot_iters: uint64
pool_contract_puzzle_hash: bytes32
@dataclass(frozen=True)
@streamable
class HarvesterHandshake(Streamable):
farmer_public_keys: List[G1Element]
pool_public_keys: List[G1Element]
@dataclass(frozen=True)
@streamable
class NewSignagePointHarvester(Streamable):
challenge_hash: bytes32
difficulty: uint64
sub_slot_iters: uint64
signage_point_index: uint8
sp_hash: bytes32
pool_difficulties: List[PoolDifficulty]
@dataclass(frozen=True)
@streamable
class NewProofOfSpace(Streamable):
challenge_hash: bytes32
sp_hash: bytes32
plot_identifier: str
proof: ProofOfSpace
signage_point_index: uint8
@dataclass(frozen=True)
@streamable
class RequestSignatures(Streamable):
plot_identifier: str
challenge_hash: bytes32
sp_hash: bytes32
messages: List[bytes32]
@dataclass(frozen=True)
@streamable
class RespondSignatures(Streamable):
plot_identifier: str
challenge_hash: bytes32
sp_hash: bytes32
local_pk: G1Element
farmer_pk: G1Element
message_signatures: List[Tuple[bytes32, G2Element]]
@dataclass(frozen=True)
@streamable
class Plot(Streamable):
filename: str
size: uint8
plot_id: bytes32
pool_public_key: Optional[G1Element]
pool_contract_puzzle_hash: Optional[bytes32]
plot_public_key: G1Element
file_size: uint64
time_modified: uint64
@dataclass(frozen=True)
@streamable
class RequestPlots(Streamable):
pass
@dataclass(frozen=True)
@streamable
class RespondPlots(Streamable):
plots: List[Plot]
failed_to_open_filenames: List[str]
no_key_filenames: List[str] | /salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/protocols/harvester_protocol.py | 0.878978 | 0.408778 | harvester_protocol.py | pypi |
import logging
from typing import Dict, List, Tuple
import aiosqlite
from salvia.server.address_manager import (
BUCKET_SIZE,
NEW_BUCKET_COUNT,
NEW_BUCKETS_PER_ADDRESS,
AddressManager,
ExtendedPeerInfo,
)
log = logging.getLogger(__name__)
class AddressManagerStore:
"""
Metadata table:
- private key
- new table count
- tried table count
Nodes table:
* Maps entries from new/tried table to unique node ids.
- node_id
- IP, port, together with the IP, port of the source peer.
New table:
* Stores node_id, bucket for each occurrence in the new table of an entry.
* Once we know the buckets, we can also deduce the bucket positions.
Every other information, such as tried_matrix, map_addr, map_info, random_pos,
be deduced and it is not explicitly stored, instead it is recalculated.
"""
db: aiosqlite.Connection
@classmethod
async def create(cls, connection) -> "AddressManagerStore":
self = cls()
self.db = connection
await self.db.commit()
await self.db.execute("CREATE TABLE IF NOT EXISTS peer_metadata(key text,value text)")
await self.db.commit()
await self.db.execute("CREATE TABLE IF NOT EXISTS peer_nodes(node_id int,value text)")
await self.db.commit()
await self.db.execute("CREATE TABLE IF NOT EXISTS peer_new_table(node_id int,bucket int)")
await self.db.commit()
return self
async def clear(self) -> None:
cursor = await self.db.execute("DELETE from peer_metadata")
await cursor.close()
cursor = await self.db.execute("DELETE from peer_nodes")
await cursor.close()
cursor = await self.db.execute("DELETE from peer_new_table")
await cursor.close()
await self.db.commit()
async def get_metadata(self) -> Dict[str, str]:
cursor = await self.db.execute("SELECT key, value from peer_metadata")
metadata = await cursor.fetchall()
await cursor.close()
return {key: value for key, value in metadata}
async def is_empty(self) -> bool:
metadata = await self.get_metadata()
if "key" not in metadata:
return True
if int(metadata.get("new_count", 0)) > 0:
return False
if int(metadata.get("tried_count", 0)) > 0:
return False
return True
async def get_nodes(self) -> List[Tuple[int, ExtendedPeerInfo]]:
cursor = await self.db.execute("SELECT node_id, value from peer_nodes")
nodes_id = await cursor.fetchall()
await cursor.close()
return [(node_id, ExtendedPeerInfo.from_string(info_str)) for node_id, info_str in nodes_id]
async def get_new_table(self) -> List[Tuple[int, int]]:
cursor = await self.db.execute("SELECT node_id, bucket from peer_new_table")
entries = await cursor.fetchall()
await cursor.close()
return [(node_id, bucket) for node_id, bucket in entries]
async def set_metadata(self, metadata) -> None:
for key, value in metadata:
cursor = await self.db.execute(
"INSERT OR REPLACE INTO peer_metadata VALUES(?, ?)",
(key, value),
)
await cursor.close()
await self.db.commit()
async def set_nodes(self, node_list) -> None:
for node_id, peer_info in node_list:
cursor = await self.db.execute(
"INSERT OR REPLACE INTO peer_nodes VALUES(?, ?)",
(node_id, peer_info.to_string()),
)
await cursor.close()
await self.db.commit()
async def set_new_table(self, entries) -> None:
for node_id, bucket in entries:
cursor = await self.db.execute(
"INSERT OR REPLACE INTO peer_new_table VALUES(?, ?)",
(node_id, bucket),
)
await cursor.close()
await self.db.commit()
async def serialize(self, address_manager: AddressManager):
metadata = []
nodes = []
new_table_entries = []
metadata.append(("key", str(address_manager.key)))
unique_ids = {}
count_ids = 0
for node_id, info in address_manager.map_info.items():
unique_ids[node_id] = count_ids
if info.ref_count > 0:
assert count_ids != address_manager.new_count
nodes.append((count_ids, info))
count_ids += 1
metadata.append(("new_count", str(count_ids)))
tried_ids = 0
for node_id, info in address_manager.map_info.items():
if info.is_tried:
assert info is not None
assert tried_ids != address_manager.tried_count
nodes.append((count_ids, info))
count_ids += 1
tried_ids += 1
metadata.append(("tried_count", str(tried_ids)))
for bucket in range(NEW_BUCKET_COUNT):
for i in range(BUCKET_SIZE):
if address_manager.new_matrix[bucket][i] != -1:
index = unique_ids[address_manager.new_matrix[bucket][i]]
new_table_entries.append((index, bucket))
await self.clear()
await self.set_metadata(metadata)
await self.set_nodes(nodes)
await self.set_new_table(new_table_entries)
async def deserialize(self) -> AddressManager:
address_manager = AddressManager()
metadata = await self.get_metadata()
nodes = await self.get_nodes()
new_table_entries = await self.get_new_table()
address_manager.clear()
address_manager.key = int(metadata["key"])
address_manager.new_count = int(metadata["new_count"])
# address_manager.tried_count = int(metadata["tried_count"])
address_manager.tried_count = 0
new_table_nodes = [(node_id, info) for node_id, info in nodes if node_id < address_manager.new_count]
for n, info in new_table_nodes:
address_manager.map_addr[info.peer_info.host] = n
address_manager.map_info[n] = info
info.random_pos = len(address_manager.random_pos)
address_manager.random_pos.append(n)
address_manager.id_count = len(new_table_nodes)
tried_table_nodes = [(node_id, info) for node_id, info in nodes if node_id >= address_manager.new_count]
# lost_count = 0
for node_id, info in tried_table_nodes:
tried_bucket = info.get_tried_bucket(address_manager.key)
tried_bucket_pos = info.get_bucket_position(address_manager.key, False, tried_bucket)
if address_manager.tried_matrix[tried_bucket][tried_bucket_pos] == -1:
info.random_pos = len(address_manager.random_pos)
info.is_tried = True
id_count = address_manager.id_count
address_manager.random_pos.append(id_count)
address_manager.map_info[id_count] = info
address_manager.map_addr[info.peer_info.host] = id_count
address_manager.tried_matrix[tried_bucket][tried_bucket_pos] = id_count
address_manager.id_count += 1
address_manager.tried_count += 1
# else:
# lost_count += 1
# address_manager.tried_count -= lost_count
for node_id, bucket in new_table_entries:
if node_id >= 0 and node_id < address_manager.new_count:
info = address_manager.map_info[node_id]
bucket_pos = info.get_bucket_position(address_manager.key, True, bucket)
if address_manager.new_matrix[bucket][bucket_pos] == -1 and info.ref_count < NEW_BUCKETS_PER_ADDRESS:
info.ref_count += 1
address_manager.new_matrix[bucket][bucket_pos] = node_id
for node_id, info in list(address_manager.map_info.items()):
if not info.is_tried and info.ref_count == 0:
address_manager.delete_new_entry_(node_id)
address_manager.load_used_table_positions()
return address_manager | /salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/server/address_manager_store.py | 0.723602 | 0.235812 | address_manager_store.py | pypi |
import random
import time
from typing import Set, List, Optional
from dataclasses import dataclass
from salvia.types.peer_info import PeerInfo
from salvia.util.ints import uint64, uint16
@dataclass(frozen=False)
class VettedPeer:
host: str
port: uint16
# 0 means we have not attempted to vet this peer yet
# a negative number means we have failed that many vetting attempts in a row
# a positive number means we have successfully vetted the peer this many
# times in a row
vetted: int = 0
# the timestamp of the last *successful* vetting of this peer
vetted_timestamp: uint64 = uint64(0)
# the last time we attempted to vet this peer, or 0 if we haven't tried yet
# we set this regardless of whether the vetting is successful or not
last_attempt: uint64 = uint64(0)
time_added: uint64 = uint64(0)
def __init__(self, h: str, p: uint16):
self.host = h
self.port = p
def __eq__(self, rhs):
return self.host == rhs.host and self.port == rhs.port
def __hash__(self):
return hash((self.host, self.port))
class IntroducerPeers:
"""
Has the list of known full node peers that are already connected or may be
connected to, and the time that they were last added.
"""
def __init__(self) -> None:
self._peers: Set[VettedPeer] = set()
def add(self, peer: Optional[PeerInfo]) -> bool:
if peer is None or not peer.port:
return False
p = VettedPeer(peer.host, peer.port)
p.time_added = uint64(int(time.time()))
if p in self._peers:
return True
self._peers.add(p)
return True
def remove(self, peer: Optional[VettedPeer]) -> bool:
if peer is None or not peer.port:
return False
try:
self._peers.remove(peer)
return True
except ValueError:
return False
def get_peers(self, max_peers: int = 0, randomize: bool = False, recent_threshold=9999999) -> List[VettedPeer]:
target_peers = [peer for peer in self._peers if time.time() - peer.time_added < recent_threshold]
if not max_peers or max_peers > len(target_peers):
max_peers = len(target_peers)
if randomize:
return random.sample(target_peers, max_peers)
else:
return target_peers[:max_peers] | /salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/server/introducer_peers.py | 0.760606 | 0.338323 | introducer_peers.py | pypi |
import asyncio
import random
from typing import Any, List, Optional, Tuple
from salvia.server.ws_connection import WSSalviaConnection
async def send_all_first_reply(
func: str, arg: Any, peers: List[WSSalviaConnection], timeout=15
) -> Optional[Tuple[Any, WSSalviaConnection]]:
"""performs an API request to peers and returns the result of the first response and the peer that sent it."""
async def do_func(peer_x: WSSalviaConnection, func_x: str, arg_x: Any):
method_to_call = getattr(peer_x, func_x)
result_x = await method_to_call(arg_x)
if result_x is not None:
return result_x, peer_x
else:
await asyncio.sleep(timeout)
return None
tasks = []
for peer in peers:
tasks.append(do_func(peer, func, arg))
done, pending = await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED)
if len(done) > 0:
d = done.pop()
result = d.result()
if result is None:
return None
response, peer = result
return response, peer
else:
return None
async def send_to_random(func: str, arg: Any, peers: List[WSSalviaConnection]) -> Optional[Tuple[Any, WSSalviaConnection]]:
"""performs an API request to peers and returns the result of the first response and the peer that sent it."""
async def do_func(peer_x: WSSalviaConnection, func_x: str, arg_x: Any):
method_to_call = getattr(peer_x, func_x)
result_x = await method_to_call(arg_x)
if result_x is not None:
return result_x, peer_x
else:
await asyncio.sleep(15)
return None
tasks = []
random_peer = random.choice(peers)
tasks.append(do_func(random_peer, func, arg))
done, pending = await asyncio.wait(tasks, return_when=asyncio.ALL_COMPLETED)
if len(done) > 0:
d = done.pop()
result = d.result()
if result is None:
return None
response, peer = result
return response, peer
else:
return None | /salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/server/connection_utils.py | 0.797044 | 0.220301 | connection_utils.py | pypi |
import json
import time
from typing import Callable, Optional, List, Any, Dict, Tuple
import aiohttp
from blspy import AugSchemeMPL, G2Element, PrivateKey
import salvia.server.ws_connection as ws
from salvia import __version__
from salvia.consensus.network_type import NetworkType
from salvia.consensus.pot_iterations import calculate_iterations_quality, calculate_sp_interval_iters
from salvia.farmer.farmer import Farmer
from salvia.protocols import farmer_protocol, harvester_protocol
from salvia.protocols.harvester_protocol import PoolDifficulty
from salvia.protocols.pool_protocol import (
get_current_authentication_token,
PoolErrorCode,
PostPartialRequest,
PostPartialPayload,
)
from salvia.protocols.protocol_message_types import ProtocolMessageTypes
from salvia.server.outbound_message import NodeType, make_msg
from salvia.server.server import ssl_context_for_root
from salvia.ssl.create_ssl import get_mozilla_ca_crt
from salvia.types.blockchain_format.pool_target import PoolTarget
from salvia.types.blockchain_format.proof_of_space import ProofOfSpace
from salvia.util.api_decorators import api_request, peer_required
from salvia.util.ints import uint32, uint64
def strip_old_entries(pairs: List[Tuple[float, Any]], before: float) -> List[Tuple[float, Any]]:
for index, [timestamp, points] in enumerate(pairs):
if timestamp >= before:
if index == 0:
return pairs
if index > 0:
return pairs[index:]
return []
class FarmerAPI:
farmer: Farmer
def __init__(self, farmer) -> None:
self.farmer = farmer
def _set_state_changed_callback(self, callback: Callable):
self.farmer.state_changed_callback = callback
@api_request
@peer_required
async def new_proof_of_space(
self, new_proof_of_space: harvester_protocol.NewProofOfSpace, peer: ws.WSSalviaConnection
):
"""
This is a response from the harvester, for a NewChallenge. Here we check if the proof
of space is sufficiently good, and if so, we ask for the whole proof.
"""
if new_proof_of_space.sp_hash not in self.farmer.number_of_responses:
self.farmer.number_of_responses[new_proof_of_space.sp_hash] = 0
self.farmer.cache_add_time[new_proof_of_space.sp_hash] = uint64(int(time.time()))
max_pos_per_sp = 5
if self.farmer.constants.NETWORK_TYPE != NetworkType.MAINNET:
# This is meant to make testnets more stable, when difficulty is very low
if self.farmer.number_of_responses[new_proof_of_space.sp_hash] > max_pos_per_sp:
self.farmer.log.info(
f"Surpassed {max_pos_per_sp} PoSpace for one SP, no longer submitting PoSpace for signage point "
f"{new_proof_of_space.sp_hash}"
)
return None
if new_proof_of_space.sp_hash not in self.farmer.sps:
self.farmer.log.warning(
f"Received response for a signage point that we do not have {new_proof_of_space.sp_hash}"
)
return None
sps = self.farmer.sps[new_proof_of_space.sp_hash]
for sp in sps:
computed_quality_string = new_proof_of_space.proof.verify_and_get_quality_string(
self.farmer.constants,
new_proof_of_space.challenge_hash,
new_proof_of_space.sp_hash,
)
if computed_quality_string is None:
self.farmer.log.error(f"Invalid proof of space {new_proof_of_space.proof}")
return None
self.farmer.number_of_responses[new_proof_of_space.sp_hash] += 1
required_iters: uint64 = calculate_iterations_quality(
self.farmer.constants.DIFFICULTY_CONSTANT_FACTOR,
computed_quality_string,
new_proof_of_space.proof.size,
sp.difficulty,
new_proof_of_space.sp_hash,
)
# If the iters are good enough to make a block, proceed with the block making flow
if required_iters < calculate_sp_interval_iters(self.farmer.constants, sp.sub_slot_iters):
# Proceed at getting the signatures for this PoSpace
request = harvester_protocol.RequestSignatures(
new_proof_of_space.plot_identifier,
new_proof_of_space.challenge_hash,
new_proof_of_space.sp_hash,
[sp.challenge_chain_sp, sp.reward_chain_sp],
)
if new_proof_of_space.sp_hash not in self.farmer.proofs_of_space:
self.farmer.proofs_of_space[new_proof_of_space.sp_hash] = []
self.farmer.proofs_of_space[new_proof_of_space.sp_hash].append(
(
new_proof_of_space.plot_identifier,
new_proof_of_space.proof,
)
)
self.farmer.cache_add_time[new_proof_of_space.sp_hash] = uint64(int(time.time()))
self.farmer.quality_str_to_identifiers[computed_quality_string] = (
new_proof_of_space.plot_identifier,
new_proof_of_space.challenge_hash,
new_proof_of_space.sp_hash,
peer.peer_node_id,
)
self.farmer.cache_add_time[computed_quality_string] = uint64(int(time.time()))
await peer.send_message(make_msg(ProtocolMessageTypes.request_signatures, request))
p2_singleton_puzzle_hash = new_proof_of_space.proof.pool_contract_puzzle_hash
if p2_singleton_puzzle_hash is not None:
# Otherwise, send the proof of space to the pool
# When we win a block, we also send the partial to the pool
if p2_singleton_puzzle_hash not in self.farmer.pool_state:
self.farmer.log.info(f"Did not find pool info for {p2_singleton_puzzle_hash}")
return
pool_state_dict: Dict = self.farmer.pool_state[p2_singleton_puzzle_hash]
pool_url = pool_state_dict["pool_config"].pool_url
if pool_url == "":
return
if pool_state_dict["current_difficulty"] is None:
self.farmer.log.warning(
f"No pool specific difficulty has been set for {p2_singleton_puzzle_hash}, "
f"check communication with the pool, skipping this partial to {pool_url}."
)
return
required_iters = calculate_iterations_quality(
self.farmer.constants.DIFFICULTY_CONSTANT_FACTOR,
computed_quality_string,
new_proof_of_space.proof.size,
pool_state_dict["current_difficulty"],
new_proof_of_space.sp_hash,
)
if required_iters >= calculate_sp_interval_iters(
self.farmer.constants, self.farmer.constants.POOL_SUB_SLOT_ITERS
):
self.farmer.log.info(
f"Proof of space not good enough for pool {pool_url}: {pool_state_dict['current_difficulty']}"
)
return
authentication_token_timeout = pool_state_dict["authentication_token_timeout"]
if authentication_token_timeout is None:
self.farmer.log.warning(
f"No pool specific authentication_token_timeout has been set for {p2_singleton_puzzle_hash}"
f", check communication with the pool."
)
return
# Submit partial to pool
is_eos = new_proof_of_space.signage_point_index == 0
payload = PostPartialPayload(
pool_state_dict["pool_config"].launcher_id,
get_current_authentication_token(authentication_token_timeout),
new_proof_of_space.proof,
new_proof_of_space.sp_hash,
is_eos,
peer.peer_node_id,
)
# The plot key is 2/2 so we need the harvester's half of the signature
m_to_sign = payload.get_hash()
request = harvester_protocol.RequestSignatures(
new_proof_of_space.plot_identifier,
new_proof_of_space.challenge_hash,
new_proof_of_space.sp_hash,
[m_to_sign],
)
response: Any = await peer.request_signatures(request)
if not isinstance(response, harvester_protocol.RespondSignatures):
self.farmer.log.error(f"Invalid response from harvester: {response}")
return
assert len(response.message_signatures) == 1
plot_signature: Optional[G2Element] = None
for sk in self.farmer.get_private_keys():
pk = sk.get_g1()
if pk == response.farmer_pk:
agg_pk = ProofOfSpace.generate_plot_public_key(response.local_pk, pk, True)
assert agg_pk == new_proof_of_space.proof.plot_public_key
sig_farmer = AugSchemeMPL.sign(sk, m_to_sign, agg_pk)
taproot_sk: PrivateKey = ProofOfSpace.generate_taproot_sk(response.local_pk, pk)
taproot_sig: G2Element = AugSchemeMPL.sign(taproot_sk, m_to_sign, agg_pk)
plot_signature = AugSchemeMPL.aggregate(
[sig_farmer, response.message_signatures[0][1], taproot_sig]
)
assert AugSchemeMPL.verify(agg_pk, m_to_sign, plot_signature)
authentication_pk = pool_state_dict["pool_config"].authentication_public_key
if bytes(authentication_pk) is None:
self.farmer.log.error(f"No authentication sk for {authentication_pk}")
return
authentication_sk: PrivateKey = self.farmer.authentication_keys[bytes(authentication_pk)]
authentication_signature = AugSchemeMPL.sign(authentication_sk, m_to_sign)
assert plot_signature is not None
agg_sig: G2Element = AugSchemeMPL.aggregate([plot_signature, authentication_signature])
post_partial_request: PostPartialRequest = PostPartialRequest(payload, agg_sig)
self.farmer.log.info(
f"Submitting partial for {post_partial_request.payload.launcher_id.hex()} to {pool_url}"
)
pool_state_dict["points_found_since_start"] += pool_state_dict["current_difficulty"]
pool_state_dict["points_found_24h"].append((time.time(), pool_state_dict["current_difficulty"]))
try:
async with aiohttp.ClientSession() as session:
async with session.post(
f"{pool_url}/partial",
json=post_partial_request.to_json_dict(),
ssl=ssl_context_for_root(get_mozilla_ca_crt(), log=self.farmer.log),
headers={"User-Agent": f"Salvia Blockchain v.{__version__}"},
) as resp:
if resp.ok:
pool_response: Dict = json.loads(await resp.text())
self.farmer.log.info(f"Pool response: {pool_response}")
if "error_code" in pool_response:
self.farmer.log.error(
f"Error in pooling: "
f"{pool_response['error_code'], pool_response['error_message']}"
)
pool_state_dict["pool_errors_24h"].append(pool_response)
if pool_response["error_code"] == PoolErrorCode.PROOF_NOT_GOOD_ENOUGH.value:
self.farmer.log.error(
"Partial not good enough, forcing pool farmer update to "
"get our current difficulty."
)
pool_state_dict["next_farmer_update"] = 0
await self.farmer.update_pool_state()
else:
new_difficulty = pool_response["new_difficulty"]
pool_state_dict["points_acknowledged_since_start"] += new_difficulty
pool_state_dict["points_acknowledged_24h"].append((time.time(), new_difficulty))
pool_state_dict["current_difficulty"] = new_difficulty
else:
self.farmer.log.error(f"Error sending partial to {pool_url}, {resp.status}")
except Exception as e:
self.farmer.log.error(f"Error connecting to pool: {e}")
return
return
@api_request
async def respond_signatures(self, response: harvester_protocol.RespondSignatures):
"""
There are two cases: receiving signatures for sps, or receiving signatures for the block.
"""
if response.sp_hash not in self.farmer.sps:
self.farmer.log.warning(f"Do not have challenge hash {response.challenge_hash}")
return None
is_sp_signatures: bool = False
sps = self.farmer.sps[response.sp_hash]
signage_point_index = sps[0].signage_point_index
found_sp_hash_debug = False
for sp_candidate in sps:
if response.sp_hash == response.message_signatures[0][0]:
found_sp_hash_debug = True
if sp_candidate.reward_chain_sp == response.message_signatures[1][0]:
is_sp_signatures = True
if found_sp_hash_debug:
assert is_sp_signatures
pospace = None
for plot_identifier, candidate_pospace in self.farmer.proofs_of_space[response.sp_hash]:
if plot_identifier == response.plot_identifier:
pospace = candidate_pospace
assert pospace is not None
include_taproot: bool = pospace.pool_contract_puzzle_hash is not None
computed_quality_string = pospace.verify_and_get_quality_string(
self.farmer.constants, response.challenge_hash, response.sp_hash
)
if computed_quality_string is None:
self.farmer.log.warning(f"Have invalid PoSpace {pospace}")
return None
if is_sp_signatures:
(
challenge_chain_sp,
challenge_chain_sp_harv_sig,
) = response.message_signatures[0]
reward_chain_sp, reward_chain_sp_harv_sig = response.message_signatures[1]
for sk in self.farmer.get_private_keys():
pk = sk.get_g1()
if pk == response.farmer_pk:
agg_pk = ProofOfSpace.generate_plot_public_key(response.local_pk, pk, include_taproot)
assert agg_pk == pospace.plot_public_key
if include_taproot:
taproot_sk: PrivateKey = ProofOfSpace.generate_taproot_sk(response.local_pk, pk)
taproot_share_cc_sp: G2Element = AugSchemeMPL.sign(taproot_sk, challenge_chain_sp, agg_pk)
taproot_share_rc_sp: G2Element = AugSchemeMPL.sign(taproot_sk, reward_chain_sp, agg_pk)
else:
taproot_share_cc_sp = G2Element()
taproot_share_rc_sp = G2Element()
farmer_share_cc_sp = AugSchemeMPL.sign(sk, challenge_chain_sp, agg_pk)
agg_sig_cc_sp = AugSchemeMPL.aggregate(
[challenge_chain_sp_harv_sig, farmer_share_cc_sp, taproot_share_cc_sp]
)
assert AugSchemeMPL.verify(agg_pk, challenge_chain_sp, agg_sig_cc_sp)
# This means it passes the sp filter
farmer_share_rc_sp = AugSchemeMPL.sign(sk, reward_chain_sp, agg_pk)
agg_sig_rc_sp = AugSchemeMPL.aggregate(
[reward_chain_sp_harv_sig, farmer_share_rc_sp, taproot_share_rc_sp]
)
assert AugSchemeMPL.verify(agg_pk, reward_chain_sp, agg_sig_rc_sp)
if pospace.pool_public_key is not None:
assert pospace.pool_contract_puzzle_hash is None
pool_pk = bytes(pospace.pool_public_key)
if pool_pk not in self.farmer.pool_sks_map:
self.farmer.log.error(
f"Don't have the private key for the pool key used by harvester: {pool_pk.hex()}"
)
return None
pool_target: Optional[PoolTarget] = PoolTarget(self.farmer.pool_target, uint32(0))
assert pool_target is not None
pool_target_signature: Optional[G2Element] = AugSchemeMPL.sign(
self.farmer.pool_sks_map[pool_pk], bytes(pool_target)
)
else:
assert pospace.pool_contract_puzzle_hash is not None
pool_target = None
pool_target_signature = None
request = farmer_protocol.DeclareProofOfSpace(
response.challenge_hash,
challenge_chain_sp,
signage_point_index,
reward_chain_sp,
pospace,
agg_sig_cc_sp,
agg_sig_rc_sp,
self.farmer.farmer_target,
pool_target,
pool_target_signature,
)
self.farmer.state_changed("proof", {"proof": request, "passed_filter": True})
msg = make_msg(ProtocolMessageTypes.declare_proof_of_space, request)
await self.farmer.server.send_to_all([msg], NodeType.FULL_NODE)
return None
else:
# This is a response with block signatures
for sk in self.farmer.get_private_keys():
(
foliage_block_data_hash,
foliage_sig_harvester,
) = response.message_signatures[0]
(
foliage_transaction_block_hash,
foliage_transaction_block_sig_harvester,
) = response.message_signatures[1]
pk = sk.get_g1()
if pk == response.farmer_pk:
agg_pk = ProofOfSpace.generate_plot_public_key(response.local_pk, pk, include_taproot)
assert agg_pk == pospace.plot_public_key
if include_taproot:
taproot_sk = ProofOfSpace.generate_taproot_sk(response.local_pk, pk)
foliage_sig_taproot: G2Element = AugSchemeMPL.sign(taproot_sk, foliage_block_data_hash, agg_pk)
foliage_transaction_block_sig_taproot: G2Element = AugSchemeMPL.sign(
taproot_sk, foliage_transaction_block_hash, agg_pk
)
else:
foliage_sig_taproot = G2Element()
foliage_transaction_block_sig_taproot = G2Element()
foliage_sig_farmer = AugSchemeMPL.sign(sk, foliage_block_data_hash, agg_pk)
foliage_transaction_block_sig_farmer = AugSchemeMPL.sign(sk, foliage_transaction_block_hash, agg_pk)
foliage_agg_sig = AugSchemeMPL.aggregate(
[foliage_sig_harvester, foliage_sig_farmer, foliage_sig_taproot]
)
foliage_block_agg_sig = AugSchemeMPL.aggregate(
[
foliage_transaction_block_sig_harvester,
foliage_transaction_block_sig_farmer,
foliage_transaction_block_sig_taproot,
]
)
assert AugSchemeMPL.verify(agg_pk, foliage_block_data_hash, foliage_agg_sig)
assert AugSchemeMPL.verify(agg_pk, foliage_transaction_block_hash, foliage_block_agg_sig)
request_to_nodes = farmer_protocol.SignedValues(
computed_quality_string,
foliage_agg_sig,
foliage_block_agg_sig,
)
msg = make_msg(ProtocolMessageTypes.signed_values, request_to_nodes)
await self.farmer.server.send_to_all([msg], NodeType.FULL_NODE)
"""
FARMER PROTOCOL (FARMER <-> FULL NODE)
"""
@api_request
async def new_signage_point(self, new_signage_point: farmer_protocol.NewSignagePoint):
try:
pool_difficulties: List[PoolDifficulty] = []
for p2_singleton_puzzle_hash, pool_dict in self.farmer.pool_state.items():
if pool_dict["pool_config"].pool_url == "":
# Self pooling
continue
if pool_dict["current_difficulty"] is None:
self.farmer.log.warning(
f"No pool specific difficulty has been set for {p2_singleton_puzzle_hash}, "
f"check communication with the pool, skipping this signage point, pool: "
f"{pool_dict['pool_config'].pool_url} "
)
continue
pool_difficulties.append(
PoolDifficulty(
pool_dict["current_difficulty"],
self.farmer.constants.POOL_SUB_SLOT_ITERS,
p2_singleton_puzzle_hash,
)
)
message = harvester_protocol.NewSignagePointHarvester(
new_signage_point.challenge_hash,
new_signage_point.difficulty,
new_signage_point.sub_slot_iters,
new_signage_point.signage_point_index,
new_signage_point.challenge_chain_sp,
pool_difficulties,
)
msg = make_msg(ProtocolMessageTypes.new_signage_point_harvester, message)
await self.farmer.server.send_to_all([msg], NodeType.HARVESTER)
if new_signage_point.challenge_chain_sp not in self.farmer.sps:
self.farmer.sps[new_signage_point.challenge_chain_sp] = []
finally:
# Age out old 24h information for every signage point regardless
# of any failures. Note that this still lets old data remain if
# the client isn't receiving signage points.
cutoff_24h = time.time() - (24 * 60 * 60)
for p2_singleton_puzzle_hash, pool_dict in self.farmer.pool_state.items():
for key in ["points_found_24h", "points_acknowledged_24h"]:
if key not in pool_dict:
continue
pool_dict[key] = strip_old_entries(pairs=pool_dict[key], before=cutoff_24h)
if new_signage_point in self.farmer.sps[new_signage_point.challenge_chain_sp]:
self.farmer.log.debug(f"Duplicate signage point {new_signage_point.signage_point_index}")
return
self.farmer.sps[new_signage_point.challenge_chain_sp].append(new_signage_point)
self.farmer.cache_add_time[new_signage_point.challenge_chain_sp] = uint64(int(time.time()))
self.farmer.state_changed("new_signage_point", {"sp_hash": new_signage_point.challenge_chain_sp})
@api_request
async def request_signed_values(self, full_node_request: farmer_protocol.RequestSignedValues):
if full_node_request.quality_string not in self.farmer.quality_str_to_identifiers:
self.farmer.log.error(f"Do not have quality string {full_node_request.quality_string}")
return None
(plot_identifier, challenge_hash, sp_hash, node_id) = self.farmer.quality_str_to_identifiers[
full_node_request.quality_string
]
request = harvester_protocol.RequestSignatures(
plot_identifier,
challenge_hash,
sp_hash,
[full_node_request.foliage_block_data_hash, full_node_request.foliage_transaction_block_hash],
)
msg = make_msg(ProtocolMessageTypes.request_signatures, request)
await self.farmer.server.send_to_specific([msg], node_id)
@api_request
async def farming_info(self, request: farmer_protocol.FarmingInfo):
self.farmer.state_changed(
"new_farming_info",
{
"farming_info": {
"challenge_hash": request.challenge_hash,
"signage_point": request.sp_hash,
"passed_filter": request.passed,
"proofs": request.proofs,
"total_plots": request.total_plots,
"timestamp": request.timestamp,
}
},
)
@api_request
@peer_required
async def respond_plots(self, _: harvester_protocol.RespondPlots, peer: ws.WSSalviaConnection):
self.farmer.log.warning(f"Respond plots came too late from: {peer.get_peer_logging()}") | /salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/farmer/farmer_api.py | 0.783658 | 0.164886 | farmer_api.py | pypi |
import logging
from typing import List, Optional, Tuple, Union
from salvia.consensus.constants import ConsensusConstants
from salvia.protocols import timelord_protocol
from salvia.timelord.iters_from_block import iters_from_block
from salvia.timelord.types import Chain, StateType
from salvia.types.blockchain_format.classgroup import ClassgroupElement
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.types.blockchain_format.slots import ChallengeBlockInfo
from salvia.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from salvia.types.end_of_slot_bundle import EndOfSubSlotBundle
from salvia.util.ints import uint8, uint32, uint64, uint128
log = logging.getLogger(__name__)
class LastState:
"""
Represents the state that the timelord is in, and should execute VDFs on top of. A state can be one of three types:
1. A "peak" or a block
2. An end of sub-slot
3. None, if it's the first sub-slot and there are no blocks yet
Timelords execute VDFs until they reach the next block or sub-slot, at which point the state is changed again.
The state can also be changed arbitrarily to a sub-slot or peak, for example in the case the timelord receives
a new block in the future.
"""
def __init__(self, constants: ConsensusConstants):
self.state_type: StateType = StateType.FIRST_SUB_SLOT
self.peak: Optional[timelord_protocol.NewPeakTimelord] = None
self.subslot_end: Optional[EndOfSubSlotBundle] = None
self.last_ip: uint64 = uint64(0)
self.deficit: uint8 = constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK
self.sub_epoch_summary: Optional[SubEpochSummary] = None
self.constants: ConsensusConstants = constants
self.last_weight: uint128 = uint128(0)
self.last_height: uint32 = uint32(0)
self.total_iters: uint128 = uint128(0)
self.last_challenge_sb_or_eos_total_iters = uint128(0)
self.last_block_total_iters: Optional[uint128] = None
self.last_peak_challenge: bytes32 = constants.GENESIS_CHALLENGE
self.difficulty: uint64 = constants.DIFFICULTY_STARTING
self.sub_slot_iters: uint64 = constants.SUB_SLOT_ITERS_STARTING
self.reward_challenge_cache: List[Tuple[bytes32, uint128]] = [(constants.GENESIS_CHALLENGE, uint128(0))]
self.new_epoch = False
self.passed_ses_height_but_not_yet_included = False
self.infused_ses = False
def set_state(self, state: Union[timelord_protocol.NewPeakTimelord, EndOfSubSlotBundle]):
if isinstance(state, timelord_protocol.NewPeakTimelord):
self.state_type = StateType.PEAK
self.peak = state
self.subslot_end = None
_, self.last_ip = iters_from_block(
self.constants,
state.reward_chain_block,
state.sub_slot_iters,
state.difficulty,
)
self.deficit = state.deficit
self.sub_epoch_summary = state.sub_epoch_summary
self.last_weight = state.reward_chain_block.weight
self.last_height = state.reward_chain_block.height
self.total_iters = state.reward_chain_block.total_iters
self.last_peak_challenge = state.reward_chain_block.get_hash()
self.difficulty = state.difficulty
self.sub_slot_iters = state.sub_slot_iters
if state.reward_chain_block.is_transaction_block:
self.last_block_total_iters = self.total_iters
self.reward_challenge_cache = state.previous_reward_challenges
self.last_challenge_sb_or_eos_total_iters = self.peak.last_challenge_sb_or_eos_total_iters
self.new_epoch = False
if (self.peak.reward_chain_block.height + 1) % self.constants.SUB_EPOCH_BLOCKS == 0:
self.passed_ses_height_but_not_yet_included = True
else:
self.passed_ses_height_but_not_yet_included = state.passes_ses_height_but_not_yet_included
elif isinstance(state, EndOfSubSlotBundle):
self.state_type = StateType.END_OF_SUB_SLOT
if self.peak is not None:
self.total_iters = uint128(self.total_iters - self.get_last_ip() + self.sub_slot_iters)
else:
self.total_iters = uint128(self.total_iters + self.sub_slot_iters)
self.peak = None
self.subslot_end = state
self.last_ip = uint64(0)
self.deficit = state.reward_chain.deficit
if state.challenge_chain.new_difficulty is not None:
assert state.challenge_chain.new_sub_slot_iters is not None
self.difficulty = state.challenge_chain.new_difficulty
self.sub_slot_iters = state.challenge_chain.new_sub_slot_iters
self.new_epoch = True
else:
self.new_epoch = False
if state.challenge_chain.subepoch_summary_hash is not None:
self.infused_ses = True
self.passed_ses_height_but_not_yet_included = False
else:
self.infused_ses = False
self.passed_ses_height_but_not_yet_included = self.passed_ses_height_but_not_yet_included
self.last_challenge_sb_or_eos_total_iters = self.total_iters
else:
self.passed_ses_height_but_not_yet_included = self.passed_ses_height_but_not_yet_included
self.new_epoch = False
self.reward_challenge_cache.append((self.get_challenge(Chain.REWARD_CHAIN), self.total_iters))
log.info(f"Updated timelord peak to {self.get_challenge(Chain.REWARD_CHAIN)}, total iters: {self.total_iters}")
while len(self.reward_challenge_cache) > 2 * self.constants.MAX_SUB_SLOT_BLOCKS:
self.reward_challenge_cache.pop(0)
def get_sub_slot_iters(self) -> uint64:
return self.sub_slot_iters
def can_infuse_block(self, overflow: bool) -> bool:
if overflow and self.new_epoch:
# No overflows in new epoch
return False
if self.state_type == StateType.FIRST_SUB_SLOT or self.state_type == StateType.END_OF_SUB_SLOT:
return True
ss_start_iters = self.get_total_iters() - self.get_last_ip()
already_infused_count: int = 0
for _, total_iters in self.reward_challenge_cache:
if total_iters > ss_start_iters:
already_infused_count += 1
if already_infused_count >= self.constants.MAX_SUB_SLOT_BLOCKS:
return False
return True
def get_weight(self) -> uint128:
return self.last_weight
def get_height(self) -> uint32:
return self.last_height
def get_total_iters(self) -> uint128:
return self.total_iters
def get_last_peak_challenge(self) -> Optional[bytes32]:
return self.last_peak_challenge
def get_difficulty(self) -> uint64:
return self.difficulty
def get_last_ip(self) -> uint64:
return self.last_ip
def get_deficit(self) -> uint8:
return self.deficit
def just_infused_sub_epoch_summary(self) -> bool:
"""
Returns true if state is an end of sub-slot, and that end of sub-slot infused a sub epoch summary
"""
return self.state_type == StateType.END_OF_SUB_SLOT and self.infused_ses
def get_next_sub_epoch_summary(self) -> Optional[SubEpochSummary]:
if self.state_type == StateType.FIRST_SUB_SLOT or self.state_type == StateType.END_OF_SUB_SLOT:
# Can only infuse SES after a peak (in an end of sub slot)
return None
assert self.peak is not None
if self.passed_ses_height_but_not_yet_included and self.get_deficit() == 0:
# This will mean we will include the ses in the next sub-slot
return self.sub_epoch_summary
return None
def get_last_block_total_iters(self) -> Optional[uint128]:
return self.last_block_total_iters
def get_passed_ses_height_but_not_yet_included(self) -> bool:
return self.passed_ses_height_but_not_yet_included
def get_challenge(self, chain: Chain) -> Optional[bytes32]:
if self.state_type == StateType.FIRST_SUB_SLOT:
assert self.peak is None and self.subslot_end is None
if chain == Chain.CHALLENGE_CHAIN:
return self.constants.GENESIS_CHALLENGE
elif chain == Chain.REWARD_CHAIN:
return self.constants.GENESIS_CHALLENGE
elif chain == Chain.INFUSED_CHALLENGE_CHAIN:
return None
elif self.state_type == StateType.PEAK:
assert self.peak is not None
reward_chain_block = self.peak.reward_chain_block
if chain == Chain.CHALLENGE_CHAIN:
return reward_chain_block.challenge_chain_ip_vdf.challenge
elif chain == Chain.REWARD_CHAIN:
return reward_chain_block.get_hash()
elif chain == Chain.INFUSED_CHALLENGE_CHAIN:
if reward_chain_block.infused_challenge_chain_ip_vdf is not None:
return reward_chain_block.infused_challenge_chain_ip_vdf.challenge
elif self.peak.deficit == self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1:
return ChallengeBlockInfo(
reward_chain_block.proof_of_space,
reward_chain_block.challenge_chain_sp_vdf,
reward_chain_block.challenge_chain_sp_signature,
reward_chain_block.challenge_chain_ip_vdf,
).get_hash()
return None
elif self.state_type == StateType.END_OF_SUB_SLOT:
assert self.subslot_end is not None
if chain == Chain.CHALLENGE_CHAIN:
return self.subslot_end.challenge_chain.get_hash()
elif chain == Chain.REWARD_CHAIN:
return self.subslot_end.reward_chain.get_hash()
elif chain == Chain.INFUSED_CHALLENGE_CHAIN:
if self.subslot_end.reward_chain.deficit < self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:
assert self.subslot_end.infused_challenge_chain is not None
return self.subslot_end.infused_challenge_chain.get_hash()
return None
return None
def get_initial_form(self, chain: Chain) -> Optional[ClassgroupElement]:
if self.state_type == StateType.FIRST_SUB_SLOT:
return ClassgroupElement.get_default_element()
elif self.state_type == StateType.PEAK:
assert self.peak is not None
reward_chain_block = self.peak.reward_chain_block
if chain == Chain.CHALLENGE_CHAIN:
return reward_chain_block.challenge_chain_ip_vdf.output
if chain == Chain.REWARD_CHAIN:
return ClassgroupElement.get_default_element()
if chain == Chain.INFUSED_CHALLENGE_CHAIN:
if reward_chain_block.infused_challenge_chain_ip_vdf is not None:
return reward_chain_block.infused_challenge_chain_ip_vdf.output
elif self.peak.deficit == self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1:
return ClassgroupElement.get_default_element()
else:
return None
elif self.state_type == StateType.END_OF_SUB_SLOT:
if chain == Chain.CHALLENGE_CHAIN or chain == Chain.REWARD_CHAIN:
return ClassgroupElement.get_default_element()
if chain == Chain.INFUSED_CHALLENGE_CHAIN:
assert self.subslot_end is not None
if self.subslot_end.reward_chain.deficit < self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:
return ClassgroupElement.get_default_element()
else:
return None
return None | /salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/timelord/timelord_state.py | 0.809991 | 0.295548 | timelord_state.py | pypi |
import collections
import logging
from typing import Dict, List, Optional, Set, Tuple, Union, Callable
from chiabip158 import PyBIP158
from clvm.casts import int_from_bytes
from salvia.consensus.block_record import BlockRecord
from salvia.consensus.block_rewards import calculate_base_farmer_reward, calculate_pool_reward
from salvia.consensus.block_root_validation import validate_block_merkle_roots
from salvia.full_node.mempool_check_conditions import mempool_check_conditions_dict
from salvia.consensus.blockchain_interface import BlockchainInterface
from salvia.consensus.coinbase import create_farmer_coin, create_pool_coin
from salvia.consensus.constants import ConsensusConstants
from salvia.consensus.cost_calculator import NPCResult, calculate_cost_of_program
from salvia.consensus.find_fork_point import find_fork_point_in_chain
from salvia.full_node.block_store import BlockStore
from salvia.full_node.coin_store import CoinStore
from salvia.full_node.mempool_check_conditions import get_name_puzzle_conditions
from salvia.types.blockchain_format.coin import Coin
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.types.coin_record import CoinRecord
from salvia.types.condition_opcodes import ConditionOpcode
from salvia.types.condition_with_args import ConditionWithArgs
from salvia.types.full_block import FullBlock
from salvia.types.generator_types import BlockGenerator
from salvia.types.name_puzzle_condition import NPC
from salvia.types.unfinished_block import UnfinishedBlock
from salvia.util import cached_bls
from salvia.util.condition_tools import pkm_pairs
from salvia.util.errors import Err
from salvia.util.generator_tools import (
additions_for_npc,
tx_removals_and_additions,
)
from salvia.util.hash import std_hash
from salvia.util.ints import uint32, uint64, uint128
log = logging.getLogger(__name__)
async def validate_block_body(
constants: ConsensusConstants,
blocks: BlockchainInterface,
block_store: BlockStore,
coin_store: CoinStore,
peak: Optional[BlockRecord],
block: Union[FullBlock, UnfinishedBlock],
height: uint32,
npc_result: Optional[NPCResult],
fork_point_with_peak: Optional[uint32],
get_block_generator: Callable,
validate_signature=True,
) -> Tuple[Optional[Err], Optional[NPCResult]]:
"""
This assumes the header block has been completely validated.
Validates the transactions and body of the block. Returns None for the first value if everything
validates correctly, or an Err if something does not validate. For the second value, returns a CostResult
only if validation succeeded, and there are transactions. In other cases it returns None. The NPC result is
the result of running the generator with the previous generators refs. It is only present for transaction
blocks which have spent coins.
"""
if isinstance(block, FullBlock):
assert height == block.height
prev_transaction_block_height: uint32 = uint32(0)
# 1. For non transaction-blocs: foliage block, transaction filter, transactions info, and generator must
# be empty. If it is a block but not a transaction block, there is no body to validate. Check that all fields are
# None
if block.foliage.foliage_transaction_block_hash is None:
if (
block.foliage_transaction_block is not None
or block.transactions_info is not None
or block.transactions_generator is not None
):
return Err.NOT_BLOCK_BUT_HAS_DATA, None
prev_tb: BlockRecord = blocks.block_record(block.prev_header_hash)
while not prev_tb.is_transaction_block:
prev_tb = blocks.block_record(prev_tb.prev_hash)
assert prev_tb.timestamp is not None
if len(block.transactions_generator_ref_list) > 0:
return Err.NOT_BLOCK_BUT_HAS_DATA, None
return None, None # This means the block is valid
# All checks below this point correspond to transaction blocks
# 2. For blocks, foliage block, transactions info must not be empty
if block.foliage_transaction_block is None or block.transactions_info is None:
return Err.IS_TRANSACTION_BLOCK_BUT_NO_DATA, None
assert block.foliage_transaction_block is not None
# keeps track of the reward coins that need to be incorporated
expected_reward_coins: Set[Coin] = set()
# 3. The transaction info hash in the Foliage block must match the transaction info
if block.foliage_transaction_block.transactions_info_hash != std_hash(block.transactions_info):
return Err.INVALID_TRANSACTIONS_INFO_HASH, None
# 4. The foliage block hash in the foliage block must match the foliage block
if block.foliage.foliage_transaction_block_hash != std_hash(block.foliage_transaction_block):
return Err.INVALID_FOLIAGE_BLOCK_HASH, None
# 5. The reward claims must be valid for the previous blocks, and current block fees
# If height == 0, expected_reward_coins will be left empty
if height > 0:
# Add reward claims for all blocks from the prev prev block, until the prev block (including the latter)
prev_transaction_block = blocks.block_record(block.foliage_transaction_block.prev_transaction_block_hash)
prev_transaction_block_height = prev_transaction_block.height
assert prev_transaction_block.fees is not None
pool_coin = create_pool_coin(
prev_transaction_block_height,
prev_transaction_block.pool_puzzle_hash,
calculate_pool_reward(prev_transaction_block.height),
constants.GENESIS_CHALLENGE,
)
farmer_coin = create_farmer_coin(
prev_transaction_block_height,
prev_transaction_block.farmer_puzzle_hash,
uint64(calculate_base_farmer_reward(prev_transaction_block.height) + prev_transaction_block.fees),
constants.GENESIS_CHALLENGE,
)
# Adds the previous block
expected_reward_coins.add(pool_coin)
expected_reward_coins.add(farmer_coin)
# For the second block in the chain, don't go back further
if prev_transaction_block.height > 0:
curr_b = blocks.block_record(prev_transaction_block.prev_hash)
while not curr_b.is_transaction_block:
expected_reward_coins.add(
create_pool_coin(
curr_b.height,
curr_b.pool_puzzle_hash,
calculate_pool_reward(curr_b.height),
constants.GENESIS_CHALLENGE,
)
)
expected_reward_coins.add(
create_farmer_coin(
curr_b.height,
curr_b.farmer_puzzle_hash,
calculate_base_farmer_reward(curr_b.height),
constants.GENESIS_CHALLENGE,
)
)
curr_b = blocks.block_record(curr_b.prev_hash)
if set(block.transactions_info.reward_claims_incorporated) != expected_reward_coins:
return Err.INVALID_REWARD_COINS, None
if len(block.transactions_info.reward_claims_incorporated) != len(expected_reward_coins):
return Err.INVALID_REWARD_COINS, None
removals: List[bytes32] = []
coinbase_additions: List[Coin] = list(expected_reward_coins)
additions: List[Coin] = []
npc_list: List[NPC] = []
removals_puzzle_dic: Dict[bytes32, bytes32] = {}
cost: uint64 = uint64(0)
# In header validation we check that timestamp is not more that 10 minutes into the future
# 6. No transactions before INITIAL_TRANSACTION_FREEZE timestamp
# (this test has been removed)
# 7a. The generator root must be the hash of the serialized bytes of
# the generator for this block (or zeroes if no generator)
if block.transactions_generator is not None:
if std_hash(bytes(block.transactions_generator)) != block.transactions_info.generator_root:
return Err.INVALID_TRANSACTIONS_GENERATOR_HASH, None
else:
if block.transactions_info.generator_root != bytes([0] * 32):
return Err.INVALID_TRANSACTIONS_GENERATOR_HASH, None
# 8a. The generator_ref_list must be the hash of the serialized bytes of
# the generator ref list for this block (or 'one' bytes [0x01] if no generator)
# 8b. The generator ref list length must be less than or equal to MAX_GENERATOR_REF_LIST_SIZE entries
# 8c. The generator ref list must not point to a height >= this block's height
if block.transactions_generator_ref_list in (None, []):
if block.transactions_info.generator_refs_root != bytes([1] * 32):
return Err.INVALID_TRANSACTIONS_GENERATOR_REFS_ROOT, None
else:
# If we have a generator reference list, we must have a generator
if block.transactions_generator is None:
return Err.INVALID_TRANSACTIONS_GENERATOR_REFS_ROOT, None
# The generator_refs_root must be the hash of the concatenation of the List[uint32]
generator_refs_hash = std_hash(b"".join([bytes(i) for i in block.transactions_generator_ref_list]))
if block.transactions_info.generator_refs_root != generator_refs_hash:
return Err.INVALID_TRANSACTIONS_GENERATOR_REFS_ROOT, None
if len(block.transactions_generator_ref_list) > constants.MAX_GENERATOR_REF_LIST_SIZE:
return Err.TOO_MANY_GENERATOR_REFS, None
if any([index >= height for index in block.transactions_generator_ref_list]):
return Err.FUTURE_GENERATOR_REFS, None
if block.transactions_generator is not None:
# Get List of names removed, puzzles hashes for removed coins and conditions created
assert npc_result is not None
cost = calculate_cost_of_program(block.transactions_generator, npc_result, constants.COST_PER_BYTE)
npc_list = npc_result.npc_list
# 7. Check that cost <= MAX_BLOCK_COST_CLVM
log.debug(
f"Cost: {cost} max: {constants.MAX_BLOCK_COST_CLVM} "
f"percent full: {round(100 * (cost / constants.MAX_BLOCK_COST_CLVM), 2)}%"
)
if cost > constants.MAX_BLOCK_COST_CLVM:
return Err.BLOCK_COST_EXCEEDS_MAX, None
# 8. The CLVM program must not return any errors
if npc_result.error is not None:
return Err(npc_result.error), None
for npc in npc_list:
removals.append(npc.coin_name)
removals_puzzle_dic[npc.coin_name] = npc.puzzle_hash
additions = additions_for_npc(npc_list)
else:
assert npc_result is None
# 9. Check that the correct cost is in the transactions info
if block.transactions_info.cost != cost:
return Err.INVALID_BLOCK_COST, None
additions_dic: Dict[bytes32, Coin] = {}
# 10. Check additions for max coin amount
# Be careful to check for 64 bit overflows in other languages. This is the max 64 bit unsigned integer
# We will not even reach here because Coins do type checking (uint64)
for coin in additions + coinbase_additions:
additions_dic[coin.name()] = coin
if coin.amount < 0:
return Err.COIN_AMOUNT_NEGATIVE, None
if coin.amount > constants.MAX_COIN_AMOUNT:
return Err.COIN_AMOUNT_EXCEEDS_MAXIMUM, None
# 11. Validate addition and removal roots
root_error = validate_block_merkle_roots(
block.foliage_transaction_block.additions_root,
block.foliage_transaction_block.removals_root,
additions + coinbase_additions,
removals,
)
if root_error:
return root_error, None
# 12. The additions and removals must result in the correct filter
byte_array_tx: List[bytes32] = []
for coin in additions + coinbase_additions:
byte_array_tx.append(bytearray(coin.puzzle_hash))
for coin_name in removals:
byte_array_tx.append(bytearray(coin_name))
bip158: PyBIP158 = PyBIP158(byte_array_tx)
encoded_filter = bytes(bip158.GetEncoded())
filter_hash = std_hash(encoded_filter)
if filter_hash != block.foliage_transaction_block.filter_hash:
return Err.INVALID_TRANSACTIONS_FILTER_HASH, None
# 13. Check for duplicate outputs in additions
addition_counter = collections.Counter(_.name() for _ in additions + coinbase_additions)
for k, v in addition_counter.items():
if v > 1:
return Err.DUPLICATE_OUTPUT, None
# 14. Check for duplicate spends inside block
removal_counter = collections.Counter(removals)
for k, v in removal_counter.items():
if v > 1:
return Err.DOUBLE_SPEND, None
# 15. Check if removals exist and were not previously spent. (unspent_db + diff_store + this_block)
# The fork point is the last block in common between the peak chain and the chain of `block`
if peak is None or height == 0:
fork_h: int = -1
elif fork_point_with_peak is not None:
fork_h = fork_point_with_peak
else:
fork_h = find_fork_point_in_chain(blocks, peak, blocks.block_record(block.prev_header_hash))
# Get additions and removals since (after) fork_h but not including this block
# The values include: the coin that was added, the height of the block in which it was confirmed, and the
# timestamp of the block in which it was confirmed
additions_since_fork: Dict[bytes32, Tuple[Coin, uint32, uint64]] = {} # This includes coinbase additions
removals_since_fork: Set[bytes32] = set()
# For height 0, there are no additions and removals before this block, so we can skip
if height > 0:
# First, get all the blocks in the fork > fork_h, < block.height
prev_block: Optional[FullBlock] = await block_store.get_full_block(block.prev_header_hash)
reorg_blocks: Dict[uint32, FullBlock] = {}
curr: Optional[FullBlock] = prev_block
assert curr is not None
while curr.height > fork_h:
if curr.height == 0:
break
curr = await block_store.get_full_block(curr.prev_header_hash)
assert curr is not None
reorg_blocks[curr.height] = curr
if fork_h != -1:
assert len(reorg_blocks) == height - fork_h - 1
curr = prev_block
assert curr is not None
while curr.height > fork_h:
# Coin store doesn't contain coins from fork, we have to run generator for each block in fork
if curr.transactions_generator is not None:
# These blocks are in the past and therefore assumed to be valid, so get_block_generator won't raise
curr_block_generator: Optional[BlockGenerator] = await get_block_generator(curr)
assert curr_block_generator is not None and curr.transactions_info is not None
curr_npc_result = get_name_puzzle_conditions(
curr_block_generator,
min(constants.MAX_BLOCK_COST_CLVM, curr.transactions_info.cost),
cost_per_byte=constants.COST_PER_BYTE,
safe_mode=False,
)
removals_in_curr, additions_in_curr = tx_removals_and_additions(curr_npc_result.npc_list)
else:
removals_in_curr = []
additions_in_curr = []
for c_name in removals_in_curr:
assert c_name not in removals_since_fork
removals_since_fork.add(c_name)
for c in additions_in_curr:
assert c.name() not in additions_since_fork
assert curr.foliage_transaction_block is not None
additions_since_fork[c.name()] = (c, curr.height, curr.foliage_transaction_block.timestamp)
for coinbase_coin in curr.get_included_reward_coins():
assert coinbase_coin.name() not in additions_since_fork
assert curr.foliage_transaction_block is not None
additions_since_fork[coinbase_coin.name()] = (
coinbase_coin,
curr.height,
curr.foliage_transaction_block.timestamp,
)
if curr.height == 0:
break
curr = reorg_blocks[curr.height - 1]
assert curr is not None
removal_coin_records: Dict[bytes32, CoinRecord] = {}
for rem in removals:
if rem in additions_dic:
# Ephemeral coin
rem_coin: Coin = additions_dic[rem]
new_unspent: CoinRecord = CoinRecord(
rem_coin,
height,
height,
True,
False,
block.foliage_transaction_block.timestamp,
)
removal_coin_records[new_unspent.name] = new_unspent
else:
unspent = await coin_store.get_coin_record(rem)
if unspent is not None and unspent.confirmed_block_index <= fork_h:
# Spending something in the current chain, confirmed before fork
# (We ignore all coins confirmed after fork)
if unspent.spent == 1 and unspent.spent_block_index <= fork_h:
# Check for coins spent in an ancestor block
return Err.DOUBLE_SPEND, None
removal_coin_records[unspent.name] = unspent
else:
# This coin is not in the current heaviest chain, so it must be in the fork
if rem not in additions_since_fork:
# Check for spending a coin that does not exist in this fork
log.error(f"Err.UNKNOWN_UNSPENT: COIN ID: {rem} NPC RESULT: {npc_result}")
return Err.UNKNOWN_UNSPENT, None
new_coin, confirmed_height, confirmed_timestamp = additions_since_fork[rem]
new_coin_record: CoinRecord = CoinRecord(
new_coin,
confirmed_height,
uint32(0),
False,
False,
confirmed_timestamp,
)
removal_coin_records[new_coin_record.name] = new_coin_record
# This check applies to both coins created before fork (pulled from coin_store),
# and coins created after fork (additions_since_fork)
if rem in removals_since_fork:
# This coin was spent in the fork
return Err.DOUBLE_SPEND_IN_FORK, None
removed = 0
for unspent in removal_coin_records.values():
removed += unspent.coin.amount
added = 0
for coin in additions:
added += coin.amount
# 16. Check that the total coin amount for added is <= removed
if removed < added:
return Err.MINTING_COIN, None
fees = removed - added
assert fees >= 0
assert_fee_sum: uint128 = uint128(0)
for npc in npc_list:
if ConditionOpcode.RESERVE_FEE in npc.condition_dict:
fee_list: List[ConditionWithArgs] = npc.condition_dict[ConditionOpcode.RESERVE_FEE]
for cvp in fee_list:
fee = int_from_bytes(cvp.vars[0])
if fee < 0:
return Err.RESERVE_FEE_CONDITION_FAILED, None
assert_fee_sum = uint128(assert_fee_sum + fee)
# 17. Check that the assert fee sum <= fees, and that each reserved fee is non-negative
if fees < assert_fee_sum:
return Err.RESERVE_FEE_CONDITION_FAILED, None
# 18. Check that the fee amount + farmer reward < maximum coin amount
if fees + calculate_base_farmer_reward(height) > constants.MAX_COIN_AMOUNT:
return Err.COIN_AMOUNT_EXCEEDS_MAXIMUM, None
# 19. Check that the computed fees are equal to the fees in the block header
if block.transactions_info.fees != fees:
return Err.INVALID_BLOCK_FEE_AMOUNT, None
# 20. Verify that removed coin puzzle_hashes match with calculated puzzle_hashes
for unspent in removal_coin_records.values():
if unspent.coin.puzzle_hash != removals_puzzle_dic[unspent.name]:
return Err.WRONG_PUZZLE_HASH, None
# 21. Verify conditions
for npc in npc_list:
assert height is not None
unspent = removal_coin_records[npc.coin_name]
error = mempool_check_conditions_dict(
unspent,
npc.condition_dict,
prev_transaction_block_height,
block.foliage_transaction_block.timestamp,
)
if error:
return error, None
# create hash_key list for aggsig check
pairs_pks, pairs_msgs = pkm_pairs(npc_list, constants.AGG_SIG_ME_ADDITIONAL_DATA)
# 22. Verify aggregated signature
# TODO: move this to pre_validate_blocks_multiprocessing so we can sync faster
if not block.transactions_info.aggregated_signature:
return Err.BAD_AGGREGATE_SIGNATURE, None
# The pairing cache is not useful while syncing as each pairing is seen
# only once, so the extra effort of populating it is not justified.
# However, we force caching of pairings just for unfinished blocks
# as the cache is likely to be useful when validating the corresponding
# finished blocks later.
if validate_signature:
force_cache: bool = isinstance(block, UnfinishedBlock)
if not cached_bls.aggregate_verify(
pairs_pks, pairs_msgs, block.transactions_info.aggregated_signature, force_cache
):
return Err.BAD_AGGREGATE_SIGNATURE, None
return None, npc_result | /salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/consensus/block_body_validation.py | 0.82386 | 0.322219 | block_body_validation.py | pypi |
import dataclasses
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.util.byte_types import hexstr_to_bytes
from salvia.util.ints import uint8, uint32, uint64, uint128
import logging
log = logging.getLogger(__name__)
@dataclasses.dataclass(frozen=True)
class ConsensusConstants:
SLOT_BLOCKS_TARGET: uint32 # How many blocks to target per sub-slot
MIN_BLOCKS_PER_CHALLENGE_BLOCK: uint8 # How many blocks must be created per slot (to make challenge sb)
# Max number of blocks that can be infused into a sub-slot.
# Note: this must be less than SUB_EPOCH_BLOCKS/2, and > SLOT_BLOCKS_TARGET
MAX_SUB_SLOT_BLOCKS: uint32
NUM_SPS_SUB_SLOT: uint32 # The number of signage points per sub-slot (including the 0th sp at the sub-slot start)
SUB_SLOT_ITERS_STARTING: uint64 # The sub_slot_iters for the first epoch
DIFFICULTY_CONSTANT_FACTOR: uint128 # Multiplied by the difficulty to get iterations
DIFFICULTY_STARTING: uint64 # The difficulty for the first epoch
# The maximum factor by which difficulty and sub_slot_iters can change per epoch
DIFFICULTY_CHANGE_MAX_FACTOR: uint32
SUB_EPOCH_BLOCKS: uint32 # The number of blocks per sub-epoch
EPOCH_BLOCKS: uint32 # The number of blocks per sub-epoch, must be a multiple of SUB_EPOCH_BLOCKS
SIGNIFICANT_BITS: int # The number of bits to look at in difficulty and min iters. The rest are zeroed
DISCRIMINANT_SIZE_BITS: int # Max is 1024 (based on ClassGroupElement int size)
NUMBER_ZERO_BITS_PLOT_FILTER: int # H(plot id + challenge hash + signage point) must start with these many zeroes
MIN_PLOT_SIZE: int
MAX_PLOT_SIZE: int
SUB_SLOT_TIME_TARGET: int # The target number of seconds per sub-slot
NUM_SP_INTERVALS_EXTRA: int # The difference between signage point and infusion point (plus required_iters)
MAX_FUTURE_TIME: int # The next block can have a timestamp of at most these many seconds more
NUMBER_OF_TIMESTAMPS: int # Than the average of the last NUMBER_OF_TIMESTAMPS blocks
# Used as the initial cc rc challenges, as well as first block back pointers, and first SES back pointer
# We override this value based on the chain being run (testnet0, testnet1, mainnet, etc)
GENESIS_CHALLENGE: bytes32
# Forks of salvia should change this value to provide replay attack protection
AGG_SIG_ME_ADDITIONAL_DATA: bytes
GENESIS_PRE_FARM_POOL_PUZZLE_HASH: bytes32 # The block at height must pay out to this pool puzzle hash
GENESIS_PRE_FARM_FARMER_PUZZLE_HASH: bytes32 # The block at height must pay out to this farmer puzzle hash
MAX_VDF_WITNESS_SIZE: int # The maximum number of classgroup elements within an n-wesolowski proof
# Size of mempool = 10x the size of block
MEMPOOL_BLOCK_BUFFER: int
# Max coin amount uint(1 << 64). This allows coin amounts to fit in 64 bits. This is around 18M salvia.
MAX_COIN_AMOUNT: int
# Max block cost in clvm cost units
MAX_BLOCK_COST_CLVM: int
# Cost per byte of generator program
COST_PER_BYTE: int
WEIGHT_PROOF_THRESHOLD: uint8
WEIGHT_PROOF_RECENT_BLOCKS: uint32
MAX_BLOCK_COUNT_PER_REQUESTS: uint32
BLOCKS_CACHE_SIZE: uint32
NETWORK_TYPE: int
MAX_GENERATOR_SIZE: uint32
MAX_GENERATOR_REF_LIST_SIZE: uint32
POOL_SUB_SLOT_ITERS: uint64
def replace(self, **changes) -> "ConsensusConstants":
return dataclasses.replace(self, **changes)
def replace_str_to_bytes(self, **changes) -> "ConsensusConstants":
"""
Overrides str (hex) values with bytes.
"""
filtered_changes = {}
for k, v in changes.items():
if not hasattr(self, k):
log.warn(f'invalid key in network configuration (config.yaml) "{k}". Ignoring')
continue
if isinstance(v, str):
filtered_changes[k] = hexstr_to_bytes(v)
else:
filtered_changes[k] = v
return dataclasses.replace(self, **filtered_changes) | /salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/consensus/constants.py | 0.70416 | 0.327924 | constants.py | pypi |
from salvia.consensus.constants import ConsensusConstants
from salvia.consensus.pos_quality import _expected_plot_size
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.util.hash import std_hash
from salvia.util.ints import uint8, uint64, uint128
def is_overflow_block(constants: ConsensusConstants, signage_point_index: uint8) -> bool:
if signage_point_index >= constants.NUM_SPS_SUB_SLOT:
raise ValueError("SP index too high")
return signage_point_index >= constants.NUM_SPS_SUB_SLOT - constants.NUM_SP_INTERVALS_EXTRA
def calculate_sp_interval_iters(constants: ConsensusConstants, sub_slot_iters: uint64) -> uint64:
assert sub_slot_iters % constants.NUM_SPS_SUB_SLOT == 0
return uint64(sub_slot_iters // constants.NUM_SPS_SUB_SLOT)
def calculate_sp_iters(constants: ConsensusConstants, sub_slot_iters: uint64, signage_point_index: uint8) -> uint64:
if signage_point_index >= constants.NUM_SPS_SUB_SLOT:
raise ValueError("SP index too high")
return uint64(calculate_sp_interval_iters(constants, sub_slot_iters) * signage_point_index)
def calculate_ip_iters(
constants: ConsensusConstants,
sub_slot_iters: uint64,
signage_point_index: uint8,
required_iters: uint64,
) -> uint64:
# Note that the SSI is for the block passed in, which might be in the previous epoch
sp_iters = calculate_sp_iters(constants, sub_slot_iters, signage_point_index)
sp_interval_iters: uint64 = calculate_sp_interval_iters(constants, sub_slot_iters)
if sp_iters % sp_interval_iters != 0 or sp_iters >= sub_slot_iters:
raise ValueError(f"Invalid sp iters {sp_iters} for this ssi {sub_slot_iters}")
if required_iters >= sp_interval_iters or required_iters == 0:
raise ValueError(
f"Required iters {required_iters} is not below the sp interval iters {sp_interval_iters} "
f"{sub_slot_iters} or not >0."
)
return uint64((sp_iters + constants.NUM_SP_INTERVALS_EXTRA * sp_interval_iters + required_iters) % sub_slot_iters)
def calculate_iterations_quality(
difficulty_constant_factor: uint128,
quality_string: bytes32,
size: int,
difficulty: uint64,
cc_sp_output_hash: bytes32,
) -> uint64:
"""
Calculates the number of iterations from the quality. This is derives as the difficulty times the constant factor
times a random number between 0 and 1 (based on quality string), divided by plot size.
"""
sp_quality_string: bytes32 = std_hash(quality_string + cc_sp_output_hash)
iters = uint64(
int(difficulty)
* int(difficulty_constant_factor)
* int.from_bytes(sp_quality_string, "big", signed=False)
// (int(pow(2, 256)) * int(_expected_plot_size(size)))
)
return max(iters, uint64(1)) | /salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/consensus/pot_iterations.py | 0.816845 | 0.5816 | pot_iterations.py | pypi |
import dataclasses
import logging
import time
from typing import Optional, Tuple
from blspy import AugSchemeMPL
from salvia.consensus.block_record import BlockRecord
from salvia.consensus.blockchain_interface import BlockchainInterface
from salvia.consensus.constants import ConsensusConstants
from salvia.consensus.deficit import calculate_deficit
from salvia.consensus.difficulty_adjustment import can_finish_sub_and_full_epoch
from salvia.consensus.get_block_challenge import final_eos_is_already_included, get_block_challenge
from salvia.consensus.make_sub_epoch_summary import make_sub_epoch_summary
from salvia.consensus.pot_iterations import (
calculate_ip_iters,
calculate_iterations_quality,
calculate_sp_interval_iters,
calculate_sp_iters,
is_overflow_block,
)
from salvia.consensus.vdf_info_computation import get_signage_point_vdf_info
from salvia.types.blockchain_format.classgroup import ClassgroupElement
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.types.blockchain_format.slots import ChallengeChainSubSlot, RewardChainSubSlot, SubSlotProofs
from salvia.types.blockchain_format.vdf import VDFInfo, VDFProof
from salvia.types.end_of_slot_bundle import EndOfSubSlotBundle
from salvia.types.header_block import HeaderBlock
from salvia.types.unfinished_header_block import UnfinishedHeaderBlock
from salvia.util.errors import Err, ValidationError
from salvia.util.hash import std_hash
from salvia.util.ints import uint8, uint32, uint64, uint128
log = logging.getLogger(__name__)
# noinspection PyCallByClass
def validate_unfinished_header_block(
constants: ConsensusConstants,
blocks: BlockchainInterface,
header_block: UnfinishedHeaderBlock,
check_filter: bool,
expected_difficulty: uint64,
expected_sub_slot_iters: uint64,
skip_overflow_last_ss_validation: bool = False,
skip_vdf_is_valid: bool = False,
check_sub_epoch_summary=True,
) -> Tuple[Optional[uint64], Optional[ValidationError]]:
"""
Validates an unfinished header block. This is a block without the infusion VDFs (unfinished)
and without transactions and transaction info (header). Returns (required_iters, error).
This method is meant to validate only the unfinished part of the block. However, the finished_sub_slots
refers to all sub-slots that were finishes from the previous block's infusion point, up to this blocks
infusion point. Therefore, in the case where this is an overflow block, and the last sub-slot is not yet
released, header_block.finished_sub_slots will be missing one sub-slot. In this case,
skip_overflow_last_ss_validation must be set to True. This will skip validation of end of slots, sub-epochs,
and lead to other small tweaks in validation.
"""
# 1. Check that the previous block exists in the blockchain, or that it is correct
prev_b = blocks.try_block_record(header_block.prev_header_hash)
genesis_block = prev_b is None
if genesis_block and header_block.prev_header_hash != constants.GENESIS_CHALLENGE:
return None, ValidationError(Err.INVALID_PREV_BLOCK_HASH)
overflow = is_overflow_block(constants, header_block.reward_chain_block.signage_point_index)
if skip_overflow_last_ss_validation and overflow:
if final_eos_is_already_included(header_block, blocks, expected_sub_slot_iters):
skip_overflow_last_ss_validation = False
finished_sub_slots_since_prev = len(header_block.finished_sub_slots)
else:
finished_sub_slots_since_prev = len(header_block.finished_sub_slots) + 1
else:
finished_sub_slots_since_prev = len(header_block.finished_sub_slots)
new_sub_slot: bool = finished_sub_slots_since_prev > 0
can_finish_se: bool = False
can_finish_epoch: bool = False
if genesis_block:
height: uint32 = uint32(0)
assert expected_difficulty == constants.DIFFICULTY_STARTING
assert expected_sub_slot_iters == constants.SUB_SLOT_ITERS_STARTING
else:
assert prev_b is not None
height = uint32(prev_b.height + 1)
if new_sub_slot:
can_finish_se, can_finish_epoch = can_finish_sub_and_full_epoch(
constants,
blocks,
prev_b.height,
prev_b.prev_hash,
prev_b.deficit,
prev_b.sub_epoch_summary_included is not None,
)
else:
can_finish_se = False
can_finish_epoch = False
# 2. Check finished slots that have been crossed since prev_b
ses_hash: Optional[bytes32] = None
if new_sub_slot and not skip_overflow_last_ss_validation:
# Finished a slot(s) since previous block. The first sub-slot must have at least one block, and all
# subsequent sub-slots must be empty
for finished_sub_slot_n, sub_slot in enumerate(header_block.finished_sub_slots):
# Start of slot challenge is fetched from SP
challenge_hash: bytes32 = sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf.challenge
if finished_sub_slot_n == 0:
if genesis_block:
# 2a. check sub-slot challenge hash for genesis block
if challenge_hash != constants.GENESIS_CHALLENGE:
return None, ValidationError(Err.INVALID_PREV_CHALLENGE_SLOT_HASH)
else:
assert prev_b is not None
curr: BlockRecord = prev_b
while not curr.first_in_sub_slot:
curr = blocks.block_record(curr.prev_hash)
assert curr.finished_challenge_slot_hashes is not None
# 2b. check sub-slot challenge hash for non-genesis block
if not curr.finished_challenge_slot_hashes[-1] == challenge_hash:
print(curr.finished_challenge_slot_hashes[-1], challenge_hash)
return None, ValidationError(Err.INVALID_PREV_CHALLENGE_SLOT_HASH)
else:
# 2c. check sub-slot challenge hash for empty slot
if (
not header_block.finished_sub_slots[finished_sub_slot_n - 1].challenge_chain.get_hash()
== challenge_hash
):
return None, ValidationError(Err.INVALID_PREV_CHALLENGE_SLOT_HASH)
if genesis_block:
# 2d. Validate that genesis block has no ICC
if sub_slot.infused_challenge_chain is not None:
return None, ValidationError(Err.SHOULD_NOT_HAVE_ICC)
else:
assert prev_b is not None
icc_iters_committed: Optional[uint64] = None
icc_iters_proof: Optional[uint64] = None
icc_challenge_hash: Optional[bytes32] = None
icc_vdf_input = None
if prev_b.deficit < constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:
# There should be no ICC chain if the last block's deficit is 16
# Prev sb's deficit is 0, 1, 2, 3, or 4
if finished_sub_slot_n == 0:
# This is the first sub slot after the last sb, which must have deficit 1-4, and thus an ICC
curr = prev_b
while not curr.is_challenge_block(constants) and not curr.first_in_sub_slot:
curr = blocks.block_record(curr.prev_hash)
if curr.is_challenge_block(constants):
icc_challenge_hash = curr.challenge_block_info_hash
icc_iters_committed = uint64(prev_b.sub_slot_iters - curr.ip_iters(constants))
else:
assert curr.finished_infused_challenge_slot_hashes is not None
icc_challenge_hash = curr.finished_infused_challenge_slot_hashes[-1]
icc_iters_committed = prev_b.sub_slot_iters
icc_iters_proof = uint64(prev_b.sub_slot_iters - prev_b.ip_iters(constants))
if prev_b.is_challenge_block(constants):
icc_vdf_input = ClassgroupElement.get_default_element()
else:
icc_vdf_input = prev_b.infused_challenge_vdf_output
else:
# This is not the first sub slot after the last block, so we might not have an ICC
if (
header_block.finished_sub_slots[finished_sub_slot_n - 1].reward_chain.deficit
< constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK
):
finished_ss = header_block.finished_sub_slots[finished_sub_slot_n - 1]
assert finished_ss.infused_challenge_chain is not None
# Only sets the icc iff the previous sub slots deficit is 4 or less
icc_challenge_hash = finished_ss.infused_challenge_chain.get_hash()
icc_iters_committed = prev_b.sub_slot_iters
icc_iters_proof = icc_iters_committed
icc_vdf_input = ClassgroupElement.get_default_element()
# 2e. Validate that there is not icc iff icc_challenge hash is None
assert (sub_slot.infused_challenge_chain is None) == (icc_challenge_hash is None)
if sub_slot.infused_challenge_chain is not None:
assert icc_vdf_input is not None
assert icc_iters_proof is not None
assert icc_challenge_hash is not None
assert sub_slot.proofs.infused_challenge_chain_slot_proof is not None
# 2f. Check infused challenge chain sub-slot VDF
# Only validate from prev_b to optimize
target_vdf_info = VDFInfo(
icc_challenge_hash,
icc_iters_proof,
sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf.output,
)
if sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf != dataclasses.replace(
target_vdf_info,
number_of_iterations=icc_iters_committed,
):
return None, ValidationError(Err.INVALID_ICC_EOS_VDF)
if not skip_vdf_is_valid:
if (
not sub_slot.proofs.infused_challenge_chain_slot_proof.normalized_to_identity
and not sub_slot.proofs.infused_challenge_chain_slot_proof.is_valid(
constants, icc_vdf_input, target_vdf_info, None
)
):
return None, ValidationError(Err.INVALID_ICC_EOS_VDF)
if (
sub_slot.proofs.infused_challenge_chain_slot_proof.normalized_to_identity
and not sub_slot.proofs.infused_challenge_chain_slot_proof.is_valid(
constants,
ClassgroupElement.get_default_element(),
sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf,
)
):
return None, ValidationError(Err.INVALID_ICC_EOS_VDF)
if sub_slot.reward_chain.deficit == constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:
# 2g. Check infused challenge sub-slot hash in challenge chain, deficit 16
if (
sub_slot.infused_challenge_chain.get_hash()
!= sub_slot.challenge_chain.infused_challenge_chain_sub_slot_hash
):
return None, ValidationError(Err.INVALID_ICC_HASH_CC)
else:
# 2h. Check infused challenge sub-slot hash not included for other deficits
if sub_slot.challenge_chain.infused_challenge_chain_sub_slot_hash is not None:
return None, ValidationError(Err.INVALID_ICC_HASH_CC)
# 2i. Check infused challenge sub-slot hash in reward sub-slot
if (
sub_slot.infused_challenge_chain.get_hash()
!= sub_slot.reward_chain.infused_challenge_chain_sub_slot_hash
):
return None, ValidationError(Err.INVALID_ICC_HASH_RC)
else:
# 2j. If no icc, check that the cc doesn't include it
if sub_slot.challenge_chain.infused_challenge_chain_sub_slot_hash is not None:
return None, ValidationError(Err.INVALID_ICC_HASH_CC)
# 2k. If no icc, check that the cc doesn't include it
if sub_slot.reward_chain.infused_challenge_chain_sub_slot_hash is not None:
return None, ValidationError(Err.INVALID_ICC_HASH_RC)
if sub_slot.challenge_chain.subepoch_summary_hash is not None:
assert ses_hash is None # Only one of the slots can have it
ses_hash = sub_slot.challenge_chain.subepoch_summary_hash
# 2l. check sub-epoch summary hash is None for empty slots
if finished_sub_slot_n != 0:
if sub_slot.challenge_chain.subepoch_summary_hash is not None:
return None, ValidationError(Err.INVALID_SUB_EPOCH_SUMMARY_HASH)
if can_finish_epoch and sub_slot.challenge_chain.subepoch_summary_hash is not None:
# 2m. Check new difficulty and ssi
if sub_slot.challenge_chain.new_sub_slot_iters != expected_sub_slot_iters:
return None, ValidationError(Err.INVALID_NEW_SUB_SLOT_ITERS)
if sub_slot.challenge_chain.new_difficulty != expected_difficulty:
return None, ValidationError(Err.INVALID_NEW_DIFFICULTY)
else:
# 2n. Check new difficulty and ssi are None if we don't finish epoch
if sub_slot.challenge_chain.new_sub_slot_iters is not None:
return None, ValidationError(Err.INVALID_NEW_SUB_SLOT_ITERS)
if sub_slot.challenge_chain.new_difficulty is not None:
return None, ValidationError(Err.INVALID_NEW_DIFFICULTY)
# 2o. Check challenge sub-slot hash in reward sub-slot
if sub_slot.challenge_chain.get_hash() != sub_slot.reward_chain.challenge_chain_sub_slot_hash:
return (
None,
ValidationError(
Err.INVALID_CHALLENGE_SLOT_HASH_RC,
"sub-slot hash in reward sub-slot mismatch",
),
)
eos_vdf_iters: uint64 = expected_sub_slot_iters
cc_start_element: ClassgroupElement = ClassgroupElement.get_default_element()
cc_eos_vdf_challenge: bytes32 = challenge_hash
if genesis_block:
if finished_sub_slot_n == 0:
# First block, one empty slot. prior_point is the initial challenge
rc_eos_vdf_challenge: bytes32 = constants.GENESIS_CHALLENGE
cc_eos_vdf_challenge = constants.GENESIS_CHALLENGE
else:
# First block, but have at least two empty slots
rc_eos_vdf_challenge = header_block.finished_sub_slots[
finished_sub_slot_n - 1
].reward_chain.get_hash()
else:
assert prev_b is not None
if finished_sub_slot_n == 0:
# No empty slots, so the starting point of VDF is the last reward block. Uses
# the same IPS as the previous block, since it's the same slot
rc_eos_vdf_challenge = prev_b.reward_infusion_new_challenge
eos_vdf_iters = uint64(prev_b.sub_slot_iters - prev_b.ip_iters(constants))
cc_start_element = prev_b.challenge_vdf_output
else:
# At least one empty slot, so use previous slot hash. IPS might change because it's a new slot
rc_eos_vdf_challenge = header_block.finished_sub_slots[
finished_sub_slot_n - 1
].reward_chain.get_hash()
# 2p. Check end of reward slot VDF
target_vdf_info = VDFInfo(
rc_eos_vdf_challenge,
eos_vdf_iters,
sub_slot.reward_chain.end_of_slot_vdf.output,
)
if not skip_vdf_is_valid and not sub_slot.proofs.reward_chain_slot_proof.is_valid(
constants,
ClassgroupElement.get_default_element(),
sub_slot.reward_chain.end_of_slot_vdf,
target_vdf_info,
):
return None, ValidationError(Err.INVALID_RC_EOS_VDF)
# 2q. Check challenge chain sub-slot VDF
partial_cc_vdf_info = VDFInfo(
cc_eos_vdf_challenge,
eos_vdf_iters,
sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf.output,
)
if genesis_block:
cc_eos_vdf_info_iters = constants.SUB_SLOT_ITERS_STARTING
else:
assert prev_b is not None
if finished_sub_slot_n == 0:
cc_eos_vdf_info_iters = prev_b.sub_slot_iters
else:
cc_eos_vdf_info_iters = expected_sub_slot_iters
# Check that the modified data is correct
if sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf != dataclasses.replace(
partial_cc_vdf_info,
number_of_iterations=cc_eos_vdf_info_iters,
):
return None, ValidationError(Err.INVALID_CC_EOS_VDF, "wrong challenge chain end of slot vdf")
if not skip_vdf_is_valid:
# Pass in None for target info since we are only checking the proof from the temporary point,
# but the challenge_chain_end_of_slot_vdf actually starts from the start of slot (for light clients)
if (
not sub_slot.proofs.challenge_chain_slot_proof.normalized_to_identity
and not sub_slot.proofs.challenge_chain_slot_proof.is_valid(
constants, cc_start_element, partial_cc_vdf_info, None
)
):
return None, ValidationError(Err.INVALID_CC_EOS_VDF)
if (
sub_slot.proofs.challenge_chain_slot_proof.normalized_to_identity
and not sub_slot.proofs.challenge_chain_slot_proof.is_valid(
constants,
ClassgroupElement.get_default_element(),
sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf,
)
):
return None, ValidationError(Err.INVALID_CC_EOS_VDF)
if genesis_block:
# 2r. Check deficit (MIN_SUB.. deficit edge case for genesis block)
if sub_slot.reward_chain.deficit != constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:
return (
None,
ValidationError(
Err.INVALID_DEFICIT,
f"genesis, expected deficit {constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK}",
),
)
else:
assert prev_b is not None
if prev_b.deficit == 0:
# 2s. If prev sb had deficit 0, resets deficit to MIN_BLOCK_PER_CHALLENGE_BLOCK
if sub_slot.reward_chain.deficit != constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:
log.error(
constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK,
)
return (
None,
ValidationError(
Err.INVALID_DEFICIT,
f"expected deficit {constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK}, saw "
f"{sub_slot.reward_chain.deficit}",
),
)
else:
# 2t. Otherwise, deficit stays the same at the slot ends, cannot reset until 0
if sub_slot.reward_chain.deficit != prev_b.deficit:
return None, ValidationError(Err.INVALID_DEFICIT, "deficit is wrong at slot end")
# 3. Check sub-epoch summary
# Note that the subepoch summary is the summary of the previous subepoch (not the one that just finished)
if not skip_overflow_last_ss_validation:
if ses_hash is not None:
# 3a. Check that genesis block does not have sub-epoch summary
if genesis_block:
return (
None,
ValidationError(
Err.INVALID_SUB_EPOCH_SUMMARY_HASH,
"genesis with sub-epoch-summary hash",
),
)
assert prev_b is not None
# 3b. Check that we finished a slot and we finished a sub-epoch
if not new_sub_slot or not can_finish_se:
return (
None,
ValidationError(
Err.INVALID_SUB_EPOCH_SUMMARY_HASH,
f"new sub-slot: {new_sub_slot} finishes sub-epoch {can_finish_se}",
),
)
# 3c. Check the actual sub-epoch is correct
if check_sub_epoch_summary:
expected_sub_epoch_summary = make_sub_epoch_summary(
constants,
blocks,
height,
blocks.block_record(prev_b.prev_hash),
expected_difficulty if can_finish_epoch else None,
expected_sub_slot_iters if can_finish_epoch else None,
)
expected_hash = expected_sub_epoch_summary.get_hash()
if expected_hash != ses_hash:
log.error(f"{expected_sub_epoch_summary}")
return (
None,
ValidationError(
Err.INVALID_SUB_EPOCH_SUMMARY,
f"expected ses hash: {expected_hash} got {ses_hash} ",
),
)
elif new_sub_slot and not genesis_block:
# 3d. Check that we don't have to include a sub-epoch summary
if can_finish_se or can_finish_epoch:
return (
None,
ValidationError(
Err.INVALID_SUB_EPOCH_SUMMARY,
"block finishes sub-epoch but ses-hash is None",
),
)
# 4. Check if the number of blocks is less than the max
if not new_sub_slot and not genesis_block:
assert prev_b is not None
num_blocks = 2 # This includes the current block and the prev block
curr = prev_b
while not curr.first_in_sub_slot:
num_blocks += 1
curr = blocks.block_record(curr.prev_hash)
if num_blocks > constants.MAX_SUB_SLOT_BLOCKS:
return None, ValidationError(Err.TOO_MANY_BLOCKS)
# If block state is correct, we should always find a challenge here
# This computes what the challenge should be for this block
challenge = get_block_challenge(
constants,
header_block,
blocks,
genesis_block,
overflow,
skip_overflow_last_ss_validation,
)
# 5a. Check proof of space
if challenge != header_block.reward_chain_block.pos_ss_cc_challenge_hash:
log.error(f"Finished slots: {header_block.finished_sub_slots}")
log.error(
f"Data: {genesis_block} {overflow} {skip_overflow_last_ss_validation} {header_block.total_iters} "
f"{header_block.reward_chain_block.signage_point_index}"
f"Prev: {prev_b}"
)
log.error(f"Challenge {challenge} provided {header_block.reward_chain_block.pos_ss_cc_challenge_hash}")
return None, ValidationError(Err.INVALID_CC_CHALLENGE)
# 5b. Check proof of space
if header_block.reward_chain_block.challenge_chain_sp_vdf is None:
# Edge case of first sp (start of slot), where sp_iters == 0
cc_sp_hash: bytes32 = challenge
else:
cc_sp_hash = header_block.reward_chain_block.challenge_chain_sp_vdf.output.get_hash()
q_str: Optional[bytes32] = header_block.reward_chain_block.proof_of_space.verify_and_get_quality_string(
constants, challenge, cc_sp_hash
)
if q_str is None:
return None, ValidationError(Err.INVALID_POSPACE)
# 6. check signage point index
# no need to check negative values as this is uint 8
if header_block.reward_chain_block.signage_point_index >= constants.NUM_SPS_SUB_SLOT:
return None, ValidationError(Err.INVALID_SP_INDEX)
# Note that required iters might be from the previous slot (if we are in an overflow block)
required_iters: uint64 = calculate_iterations_quality(
constants.DIFFICULTY_CONSTANT_FACTOR,
q_str,
header_block.reward_chain_block.proof_of_space.size,
expected_difficulty,
cc_sp_hash,
)
# 7. check required iters
if required_iters >= calculate_sp_interval_iters(constants, expected_sub_slot_iters):
return None, ValidationError(Err.INVALID_REQUIRED_ITERS)
# 8a. check signage point index 0 has no cc sp
if (header_block.reward_chain_block.signage_point_index == 0) != (
header_block.reward_chain_block.challenge_chain_sp_vdf is None
):
return None, ValidationError(Err.INVALID_SP_INDEX)
# 8b. check signage point index 0 has no rc sp
if (header_block.reward_chain_block.signage_point_index == 0) != (
header_block.reward_chain_block.reward_chain_sp_vdf is None
):
return None, ValidationError(Err.INVALID_SP_INDEX)
sp_iters: uint64 = calculate_sp_iters(
constants,
expected_sub_slot_iters,
header_block.reward_chain_block.signage_point_index,
)
ip_iters: uint64 = calculate_ip_iters(
constants,
expected_sub_slot_iters,
header_block.reward_chain_block.signage_point_index,
required_iters,
)
if header_block.reward_chain_block.challenge_chain_sp_vdf is None:
# Blocks with very low required iters are not overflow blocks
assert not overflow
# 9. Check no overflows in the first sub-slot of a new epoch
# (although they are OK in the second sub-slot), this is important
if overflow and can_finish_epoch:
if finished_sub_slots_since_prev < 2:
return None, ValidationError(Err.NO_OVERFLOWS_IN_FIRST_SUB_SLOT_NEW_EPOCH)
# 10. Check total iters
if genesis_block:
total_iters: uint128 = uint128(expected_sub_slot_iters * finished_sub_slots_since_prev)
else:
assert prev_b is not None
if new_sub_slot:
total_iters = prev_b.total_iters
# Add the rest of the slot of prev_b
total_iters = uint128(total_iters + prev_b.sub_slot_iters - prev_b.ip_iters(constants))
# Add other empty slots
total_iters = uint128(total_iters + (expected_sub_slot_iters * (finished_sub_slots_since_prev - 1)))
else:
# Slot iters is guaranteed to be the same for header_block and prev_b
# This takes the beginning of the slot, and adds ip_iters
total_iters = uint128(prev_b.total_iters - prev_b.ip_iters(constants))
total_iters = uint128(total_iters + ip_iters)
if total_iters != header_block.reward_chain_block.total_iters:
return (
None,
ValidationError(
Err.INVALID_TOTAL_ITERS,
f"expected {total_iters} got {header_block.reward_chain_block.total_iters}",
),
)
sp_total_iters: uint128 = uint128(total_iters - ip_iters + sp_iters - (expected_sub_slot_iters if overflow else 0))
if overflow and skip_overflow_last_ss_validation:
dummy_vdf_info = VDFInfo(
bytes32([0] * 32),
uint64(1),
ClassgroupElement.get_default_element(),
)
dummy_sub_slot = EndOfSubSlotBundle(
ChallengeChainSubSlot(dummy_vdf_info, None, None, None, None),
None,
RewardChainSubSlot(dummy_vdf_info, bytes32([0] * 32), None, uint8(0)),
SubSlotProofs(VDFProof(uint8(0), b"", False), None, VDFProof(uint8(0), b"", False)),
)
sub_slots_to_pass_in = header_block.finished_sub_slots + [dummy_sub_slot]
else:
sub_slots_to_pass_in = header_block.finished_sub_slots
(
cc_vdf_challenge,
rc_vdf_challenge,
cc_vdf_input,
rc_vdf_input,
cc_vdf_iters,
rc_vdf_iters,
) = get_signage_point_vdf_info(
constants,
sub_slots_to_pass_in,
overflow,
prev_b,
blocks,
sp_total_iters,
sp_iters,
)
# 11. Check reward chain sp proof
if sp_iters != 0:
assert (
header_block.reward_chain_block.reward_chain_sp_vdf is not None
and header_block.reward_chain_sp_proof is not None
)
target_vdf_info = VDFInfo(
rc_vdf_challenge,
rc_vdf_iters,
header_block.reward_chain_block.reward_chain_sp_vdf.output,
)
if not skip_vdf_is_valid and not header_block.reward_chain_sp_proof.is_valid(
constants,
rc_vdf_input,
header_block.reward_chain_block.reward_chain_sp_vdf,
target_vdf_info,
):
return None, ValidationError(Err.INVALID_RC_SP_VDF)
rc_sp_hash = header_block.reward_chain_block.reward_chain_sp_vdf.output.get_hash()
else:
# Edge case of first sp (start of slot), where sp_iters == 0
assert overflow is not None
if header_block.reward_chain_block.reward_chain_sp_vdf is not None:
return None, ValidationError(Err.INVALID_RC_SP_VDF)
if new_sub_slot:
rc_sp_hash = header_block.finished_sub_slots[-1].reward_chain.get_hash()
else:
if genesis_block:
rc_sp_hash = constants.GENESIS_CHALLENGE
else:
assert prev_b is not None
curr = prev_b
while not curr.first_in_sub_slot:
curr = blocks.block_record(curr.prev_hash)
assert curr.finished_reward_slot_hashes is not None
rc_sp_hash = curr.finished_reward_slot_hashes[-1]
# 12. Check reward chain sp signature
if not AugSchemeMPL.verify(
header_block.reward_chain_block.proof_of_space.plot_public_key,
rc_sp_hash,
header_block.reward_chain_block.reward_chain_sp_signature,
):
return None, ValidationError(Err.INVALID_RC_SIGNATURE)
# 13. Check cc sp vdf
if sp_iters != 0:
assert header_block.reward_chain_block.challenge_chain_sp_vdf is not None
assert header_block.challenge_chain_sp_proof is not None
target_vdf_info = VDFInfo(
cc_vdf_challenge,
cc_vdf_iters,
header_block.reward_chain_block.challenge_chain_sp_vdf.output,
)
if header_block.reward_chain_block.challenge_chain_sp_vdf != dataclasses.replace(
target_vdf_info,
number_of_iterations=sp_iters,
):
return None, ValidationError(Err.INVALID_CC_SP_VDF)
if not skip_vdf_is_valid:
if (
not header_block.challenge_chain_sp_proof.normalized_to_identity
and not header_block.challenge_chain_sp_proof.is_valid(constants, cc_vdf_input, target_vdf_info, None)
):
return None, ValidationError(Err.INVALID_CC_SP_VDF)
if (
header_block.challenge_chain_sp_proof.normalized_to_identity
and not header_block.challenge_chain_sp_proof.is_valid(
constants,
ClassgroupElement.get_default_element(),
header_block.reward_chain_block.challenge_chain_sp_vdf,
)
):
return None, ValidationError(Err.INVALID_CC_SP_VDF)
else:
assert overflow is not None
if header_block.reward_chain_block.challenge_chain_sp_vdf is not None:
return None, ValidationError(Err.INVALID_CC_SP_VDF)
# 14. Check cc sp sig
if not AugSchemeMPL.verify(
header_block.reward_chain_block.proof_of_space.plot_public_key,
cc_sp_hash,
header_block.reward_chain_block.challenge_chain_sp_signature,
):
return None, ValidationError(Err.INVALID_CC_SIGNATURE, "invalid cc sp sig")
# 15. Check is_transaction_block
if genesis_block:
if header_block.foliage.foliage_transaction_block_hash is None:
return None, ValidationError(Err.INVALID_IS_TRANSACTION_BLOCK, "invalid genesis")
else:
assert prev_b is not None
# Finds the previous block
curr = prev_b
while not curr.is_transaction_block:
curr = blocks.block_record(curr.prev_hash)
# The first block to have an sp > the last tx block's infusion iters, is a tx block
if overflow:
our_sp_total_iters: uint128 = uint128(total_iters - ip_iters + sp_iters - expected_sub_slot_iters)
else:
our_sp_total_iters = uint128(total_iters - ip_iters + sp_iters)
if (our_sp_total_iters > curr.total_iters) != (header_block.foliage.foliage_transaction_block_hash is not None):
return None, ValidationError(Err.INVALID_IS_TRANSACTION_BLOCK)
if (our_sp_total_iters > curr.total_iters) != (
header_block.foliage.foliage_transaction_block_signature is not None
):
return None, ValidationError(Err.INVALID_IS_TRANSACTION_BLOCK)
# 16. Check foliage block signature by plot key
if not AugSchemeMPL.verify(
header_block.reward_chain_block.proof_of_space.plot_public_key,
header_block.foliage.foliage_block_data.get_hash(),
header_block.foliage.foliage_block_data_signature,
):
return None, ValidationError(Err.INVALID_PLOT_SIGNATURE)
# 17. Check foliage block signature by plot key
if header_block.foliage.foliage_transaction_block_hash is not None:
if not AugSchemeMPL.verify(
header_block.reward_chain_block.proof_of_space.plot_public_key,
header_block.foliage.foliage_transaction_block_hash,
header_block.foliage.foliage_transaction_block_signature,
):
return None, ValidationError(Err.INVALID_PLOT_SIGNATURE)
# 18. Check unfinished reward chain block hash
if (
header_block.reward_chain_block.get_hash()
!= header_block.foliage.foliage_block_data.unfinished_reward_block_hash
):
return None, ValidationError(Err.INVALID_URSB_HASH)
# 19. Check pool target max height
if (
header_block.foliage.foliage_block_data.pool_target.max_height != 0
and header_block.foliage.foliage_block_data.pool_target.max_height < height
):
return None, ValidationError(Err.OLD_POOL_TARGET)
# 20a. Check pre-farm puzzle hashes for genesis block.
if genesis_block:
if (
header_block.foliage.foliage_block_data.pool_target.puzzle_hash
!= constants.GENESIS_PRE_FARM_POOL_PUZZLE_HASH
):
log.error(f"Pool target {header_block.foliage.foliage_block_data.pool_target} hb {header_block}")
return None, ValidationError(Err.INVALID_PREFARM)
if (
header_block.foliage.foliage_block_data.farmer_reward_puzzle_hash
!= constants.GENESIS_PRE_FARM_FARMER_PUZZLE_HASH
):
return None, ValidationError(Err.INVALID_PREFARM)
else:
# 20b. If pospace has a pool pk, heck pool target signature. Should not check this for genesis block.
if header_block.reward_chain_block.proof_of_space.pool_public_key is not None:
assert header_block.reward_chain_block.proof_of_space.pool_contract_puzzle_hash is None
if not AugSchemeMPL.verify(
header_block.reward_chain_block.proof_of_space.pool_public_key,
bytes(header_block.foliage.foliage_block_data.pool_target),
header_block.foliage.foliage_block_data.pool_signature,
):
return None, ValidationError(Err.INVALID_POOL_SIGNATURE)
else:
# 20c. Otherwise, the plot is associated with a contract puzzle hash, not a public key
assert header_block.reward_chain_block.proof_of_space.pool_contract_puzzle_hash is not None
if (
header_block.foliage.foliage_block_data.pool_target.puzzle_hash
!= header_block.reward_chain_block.proof_of_space.pool_contract_puzzle_hash
):
return None, ValidationError(Err.INVALID_POOL_TARGET)
# 21. Check extension data if applicable. None for mainnet.
# 22. Check if foliage block is present
if (header_block.foliage.foliage_transaction_block_hash is not None) != (
header_block.foliage_transaction_block is not None
):
return None, ValidationError(Err.INVALID_FOLIAGE_BLOCK_PRESENCE)
if (header_block.foliage.foliage_transaction_block_signature is not None) != (
header_block.foliage_transaction_block is not None
):
return None, ValidationError(Err.INVALID_FOLIAGE_BLOCK_PRESENCE)
if header_block.foliage_transaction_block is not None:
# 23. Check foliage block hash
if header_block.foliage_transaction_block.get_hash() != header_block.foliage.foliage_transaction_block_hash:
return None, ValidationError(Err.INVALID_FOLIAGE_BLOCK_HASH)
if genesis_block:
# 24a. Check prev block hash for genesis
if header_block.foliage_transaction_block.prev_transaction_block_hash != constants.GENESIS_CHALLENGE:
return None, ValidationError(Err.INVALID_PREV_BLOCK_HASH)
else:
assert prev_b is not None
# 24b. Check prev block hash for non-genesis
curr_b: BlockRecord = prev_b
while not curr_b.is_transaction_block:
curr_b = blocks.block_record(curr_b.prev_hash)
if not header_block.foliage_transaction_block.prev_transaction_block_hash == curr_b.header_hash:
log.error(
f"Prev BH: {header_block.foliage_transaction_block.prev_transaction_block_hash} "
f"{curr_b.header_hash} curr sb: {curr_b}"
)
return None, ValidationError(Err.INVALID_PREV_BLOCK_HASH)
# 25. The filter hash in the Foliage Block must be the hash of the filter
if check_filter:
if header_block.foliage_transaction_block.filter_hash != std_hash(header_block.transactions_filter):
return None, ValidationError(Err.INVALID_TRANSACTIONS_FILTER_HASH)
# 26a. The timestamp in Foliage Block must not be over 5 minutes in the future
if header_block.foliage_transaction_block.timestamp > int(time.time() + constants.MAX_FUTURE_TIME):
return None, ValidationError(Err.TIMESTAMP_TOO_FAR_IN_FUTURE)
if prev_b is not None:
# 26b. The timestamp must be greater than the previous transaction block timestamp
prev_transaction_b = blocks.block_record(header_block.foliage_transaction_block.prev_transaction_block_hash)
assert prev_transaction_b.timestamp is not None
if header_block.foliage_transaction_block.timestamp <= prev_transaction_b.timestamp:
return None, ValidationError(Err.TIMESTAMP_TOO_FAR_IN_PAST)
return required_iters, None # Valid unfinished header block
def validate_finished_header_block(
constants: ConsensusConstants,
blocks: BlockchainInterface,
header_block: HeaderBlock,
check_filter: bool,
expected_difficulty: uint64,
expected_sub_slot_iters: uint64,
check_sub_epoch_summary=True,
) -> Tuple[Optional[uint64], Optional[ValidationError]]:
"""
Fully validates the header of a block. A header block is the same as a full block, but
without transactions and transaction info. Returns (required_iters, error).
"""
unfinished_header_block = UnfinishedHeaderBlock(
header_block.finished_sub_slots,
header_block.reward_chain_block.get_unfinished(),
header_block.challenge_chain_sp_proof,
header_block.reward_chain_sp_proof,
header_block.foliage,
header_block.foliage_transaction_block,
header_block.transactions_filter,
)
required_iters, validate_unfinished_err = validate_unfinished_header_block(
constants,
blocks,
unfinished_header_block,
check_filter,
expected_difficulty,
expected_sub_slot_iters,
False,
check_sub_epoch_summary=check_sub_epoch_summary,
)
genesis_block = False
if validate_unfinished_err is not None:
return None, validate_unfinished_err
assert required_iters is not None
if header_block.height == 0:
prev_b: Optional[BlockRecord] = None
genesis_block = True
else:
prev_b = blocks.block_record(header_block.prev_header_hash)
new_sub_slot: bool = len(header_block.finished_sub_slots) > 0
ip_iters: uint64 = calculate_ip_iters(
constants,
expected_sub_slot_iters,
header_block.reward_chain_block.signage_point_index,
required_iters,
)
if not genesis_block:
assert prev_b is not None
# 27. Check block height
if header_block.height != prev_b.height + 1:
return None, ValidationError(Err.INVALID_HEIGHT)
# 28. Check weight
if header_block.weight != prev_b.weight + expected_difficulty:
log.error(f"INVALID WEIGHT: {header_block} {prev_b} {expected_difficulty}")
return None, ValidationError(Err.INVALID_WEIGHT)
else:
# 27b. Check genesis block height, weight, and prev block hash
if header_block.height != uint32(0):
return None, ValidationError(Err.INVALID_HEIGHT)
if header_block.weight != constants.DIFFICULTY_STARTING:
return None, ValidationError(Err.INVALID_WEIGHT)
if header_block.prev_header_hash != constants.GENESIS_CHALLENGE:
return None, ValidationError(Err.INVALID_PREV_BLOCK_HASH)
# RC vdf challenge is taken from more recent of (slot start, prev_block)
if genesis_block:
cc_vdf_output = ClassgroupElement.get_default_element()
ip_vdf_iters = ip_iters
if new_sub_slot:
rc_vdf_challenge = header_block.finished_sub_slots[-1].reward_chain.get_hash()
else:
rc_vdf_challenge = constants.GENESIS_CHALLENGE
else:
assert prev_b is not None
if new_sub_slot:
# slot start is more recent
rc_vdf_challenge = header_block.finished_sub_slots[-1].reward_chain.get_hash()
ip_vdf_iters = ip_iters
cc_vdf_output = ClassgroupElement.get_default_element()
else:
# Prev sb is more recent
rc_vdf_challenge = prev_b.reward_infusion_new_challenge
ip_vdf_iters = uint64(header_block.reward_chain_block.total_iters - prev_b.total_iters)
cc_vdf_output = prev_b.challenge_vdf_output
# 29. Check challenge chain infusion point VDF
if new_sub_slot:
cc_vdf_challenge = header_block.finished_sub_slots[-1].challenge_chain.get_hash()
else:
# Not first block in slot
if genesis_block:
# genesis block
cc_vdf_challenge = constants.GENESIS_CHALLENGE
else:
assert prev_b is not None
# Not genesis block, go back to first block in slot
curr = prev_b
while curr.finished_challenge_slot_hashes is None:
curr = blocks.block_record(curr.prev_hash)
cc_vdf_challenge = curr.finished_challenge_slot_hashes[-1]
cc_target_vdf_info = VDFInfo(
cc_vdf_challenge,
ip_vdf_iters,
header_block.reward_chain_block.challenge_chain_ip_vdf.output,
)
if header_block.reward_chain_block.challenge_chain_ip_vdf != dataclasses.replace(
cc_target_vdf_info,
number_of_iterations=ip_iters,
):
expected = dataclasses.replace(
cc_target_vdf_info,
number_of_iterations=ip_iters,
)
log.error(f"{header_block.reward_chain_block.challenge_chain_ip_vdf }. expected {expected}")
log.error(f"Block: {header_block}")
return None, ValidationError(Err.INVALID_CC_IP_VDF)
if (
not header_block.challenge_chain_ip_proof.normalized_to_identity
and not header_block.challenge_chain_ip_proof.is_valid(
constants,
cc_vdf_output,
cc_target_vdf_info,
None,
)
):
log.error(f"Did not validate, output {cc_vdf_output}")
log.error(f"Block: {header_block}")
return None, ValidationError(Err.INVALID_CC_IP_VDF)
if (
header_block.challenge_chain_ip_proof.normalized_to_identity
and not header_block.challenge_chain_ip_proof.is_valid(
constants,
ClassgroupElement.get_default_element(),
header_block.reward_chain_block.challenge_chain_ip_vdf,
)
):
return None, ValidationError(Err.INVALID_CC_IP_VDF)
# 30. Check reward chain infusion point VDF
rc_target_vdf_info = VDFInfo(
rc_vdf_challenge,
ip_vdf_iters,
header_block.reward_chain_block.reward_chain_ip_vdf.output,
)
if not header_block.reward_chain_ip_proof.is_valid(
constants,
ClassgroupElement.get_default_element(),
header_block.reward_chain_block.reward_chain_ip_vdf,
rc_target_vdf_info,
):
return None, ValidationError(Err.INVALID_RC_IP_VDF)
# 31. Check infused challenge chain infusion point VDF
if not genesis_block:
overflow = is_overflow_block(constants, header_block.reward_chain_block.signage_point_index)
deficit = calculate_deficit(
constants,
header_block.height,
prev_b,
overflow,
len(header_block.finished_sub_slots),
)
if header_block.reward_chain_block.infused_challenge_chain_ip_vdf is None:
# If we don't have an ICC chain, deficit must be 4 or 5
if deficit < constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1:
return None, ValidationError(Err.INVALID_ICC_VDF)
else:
assert header_block.infused_challenge_chain_ip_proof is not None
# If we have an ICC chain, deficit must be 0, 1, 2 or 3
if deficit >= constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1:
return (
None,
ValidationError(
Err.INVALID_ICC_VDF,
f"icc vdf and deficit is bigger or equal to {constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1}",
),
)
if new_sub_slot:
last_ss = header_block.finished_sub_slots[-1]
assert last_ss.infused_challenge_chain is not None
icc_vdf_challenge: bytes32 = last_ss.infused_challenge_chain.get_hash()
icc_vdf_input: Optional[ClassgroupElement] = ClassgroupElement.get_default_element()
else:
assert prev_b is not None
if prev_b.is_challenge_block(constants):
icc_vdf_input = ClassgroupElement.get_default_element()
else:
icc_vdf_input = prev_b.infused_challenge_vdf_output
curr = prev_b
while curr.finished_infused_challenge_slot_hashes is None and not curr.is_challenge_block(constants):
curr = blocks.block_record(curr.prev_hash)
if curr.is_challenge_block(constants):
icc_vdf_challenge = curr.challenge_block_info_hash
else:
assert curr.finished_infused_challenge_slot_hashes is not None
icc_vdf_challenge = curr.finished_infused_challenge_slot_hashes[-1]
icc_target_vdf_info = VDFInfo(
icc_vdf_challenge,
ip_vdf_iters,
header_block.reward_chain_block.infused_challenge_chain_ip_vdf.output,
)
if icc_vdf_input is None or not header_block.infused_challenge_chain_ip_proof.is_valid(
constants,
icc_vdf_input,
header_block.reward_chain_block.infused_challenge_chain_ip_vdf,
icc_target_vdf_info,
):
return None, ValidationError(Err.INVALID_ICC_VDF, "invalid icc proof")
else:
if header_block.infused_challenge_chain_ip_proof is not None:
return None, ValidationError(Err.INVALID_ICC_VDF)
# 32. Check reward block hash
if header_block.foliage.reward_block_hash != header_block.reward_chain_block.get_hash():
return None, ValidationError(Err.INVALID_REWARD_BLOCK_HASH)
# 33. Check reward block is_transaction_block
if (
header_block.foliage.foliage_transaction_block_hash is not None
) != header_block.reward_chain_block.is_transaction_block:
return None, ValidationError(Err.INVALID_FOLIAGE_BLOCK_PRESENCE)
return required_iters, None | /salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/consensus/block_header_validation.py | 0.780746 | 0.295074 | block_header_validation.py | pypi |
from typing import List, Optional
from salvia.consensus.block_record import BlockRecord
from salvia.consensus.blockchain_interface import BlockchainInterface
from salvia.consensus.constants import ConsensusConstants
from salvia.types.blockchain_format.classgroup import ClassgroupElement
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.types.end_of_slot_bundle import EndOfSubSlotBundle
from salvia.util.ints import uint64, uint128
def get_signage_point_vdf_info(
constants: ConsensusConstants,
finished_sub_slots: List[EndOfSubSlotBundle],
overflow: bool,
prev_b: Optional[BlockRecord],
blocks: BlockchainInterface,
sp_total_iters: uint128,
sp_iters: uint64,
):
"""
Returns the following information, for the VDF of the signage point at sp_total_iters.
cc and rc challenge hash
cc and rc input
cc and rc iterations
"""
new_sub_slot: bool = len(finished_sub_slots) > 0
genesis_block: bool = prev_b is None
if new_sub_slot and not overflow:
# Case 1: start from start of this slot. Case of no overflow slots. Also includes genesis block after empty
# slot(s), but not overflowing
rc_vdf_challenge: bytes32 = finished_sub_slots[-1].reward_chain.get_hash()
cc_vdf_challenge = finished_sub_slots[-1].challenge_chain.get_hash()
sp_vdf_iters = sp_iters
cc_vdf_input = ClassgroupElement.get_default_element()
elif new_sub_slot and overflow and len(finished_sub_slots) > 1:
# Case 2: start from start of prev slot. This is a rare case of empty prev slot. Includes genesis block after
# 2 empty slots
rc_vdf_challenge = finished_sub_slots[-2].reward_chain.get_hash()
cc_vdf_challenge = finished_sub_slots[-2].challenge_chain.get_hash()
sp_vdf_iters = sp_iters
cc_vdf_input = ClassgroupElement.get_default_element()
elif genesis_block:
# Case 3: Genesis block case, first challenge
rc_vdf_challenge = constants.GENESIS_CHALLENGE
cc_vdf_challenge = constants.GENESIS_CHALLENGE
sp_vdf_iters = sp_iters
cc_vdf_input = ClassgroupElement.get_default_element()
elif new_sub_slot and overflow and len(finished_sub_slots) == 1:
# Case 4: Starting at prev will put us in the previous, sub-slot, since case 2 handled more empty slots
assert prev_b is not None
curr: BlockRecord = prev_b
while not curr.first_in_sub_slot and curr.total_iters > sp_total_iters:
curr = blocks.block_record(curr.prev_hash)
if curr.total_iters < sp_total_iters:
sp_vdf_iters = uint64(sp_total_iters - curr.total_iters)
cc_vdf_input = curr.challenge_vdf_output
rc_vdf_challenge = curr.reward_infusion_new_challenge
else:
assert curr.finished_reward_slot_hashes is not None
sp_vdf_iters = sp_iters
cc_vdf_input = ClassgroupElement.get_default_element()
rc_vdf_challenge = curr.finished_reward_slot_hashes[-1]
while not curr.first_in_sub_slot:
curr = blocks.block_record(curr.prev_hash)
assert curr.finished_challenge_slot_hashes is not None
cc_vdf_challenge = curr.finished_challenge_slot_hashes[-1]
elif not new_sub_slot and overflow:
# Case 5: prev is in the same sub slot and also overflow. Starting at prev does not skip any sub slots
assert prev_b is not None
curr = prev_b
# Collects the last two finished slots
if curr.first_in_sub_slot:
assert curr.finished_challenge_slot_hashes is not None
assert curr.finished_reward_slot_hashes is not None
found_sub_slots = list(
reversed(
list(
zip(
curr.finished_challenge_slot_hashes,
curr.finished_reward_slot_hashes,
)
)
)
)
else:
found_sub_slots = []
sp_pre_sb: Optional[BlockRecord] = None
while len(found_sub_slots) < 2 and curr.height > 0:
if sp_pre_sb is None and curr.total_iters < sp_total_iters:
sp_pre_sb = curr
curr = blocks.block_record(curr.prev_hash)
if curr.first_in_sub_slot:
assert curr.finished_challenge_slot_hashes is not None
assert curr.finished_reward_slot_hashes is not None
found_sub_slots += list(
reversed(
list(
zip(
curr.finished_challenge_slot_hashes,
curr.finished_reward_slot_hashes,
)
)
)
)
if sp_pre_sb is None and curr.total_iters < sp_total_iters:
sp_pre_sb = curr
if sp_pre_sb is not None:
sp_vdf_iters = uint64(sp_total_iters - sp_pre_sb.total_iters)
cc_vdf_input = sp_pre_sb.challenge_vdf_output
rc_vdf_challenge = sp_pre_sb.reward_infusion_new_challenge
else:
sp_vdf_iters = sp_iters
cc_vdf_input = ClassgroupElement.get_default_element()
rc_vdf_challenge = found_sub_slots[1][1]
cc_vdf_challenge = found_sub_slots[1][0]
elif not new_sub_slot and not overflow:
# Case 6: prev is in the same sub slot. Starting at prev does not skip any sub slots. We do not need
# to go back another sub slot, because it's not overflow, so the VDF to signage point is this sub-slot.
assert prev_b is not None
curr = prev_b
while not curr.first_in_sub_slot and curr.total_iters > sp_total_iters:
curr = blocks.block_record(curr.prev_hash)
if curr.total_iters < sp_total_iters:
sp_vdf_iters = uint64(sp_total_iters - curr.total_iters)
cc_vdf_input = curr.challenge_vdf_output
rc_vdf_challenge = curr.reward_infusion_new_challenge
else:
assert curr.finished_reward_slot_hashes is not None
sp_vdf_iters = sp_iters
cc_vdf_input = ClassgroupElement.get_default_element()
rc_vdf_challenge = curr.finished_reward_slot_hashes[-1]
while not curr.first_in_sub_slot:
curr = blocks.block_record(curr.prev_hash)
assert curr.finished_challenge_slot_hashes is not None
cc_vdf_challenge = curr.finished_challenge_slot_hashes[-1]
else:
# All cases are handled above
assert False
return (
cc_vdf_challenge,
rc_vdf_challenge,
cc_vdf_input,
ClassgroupElement.get_default_element(),
sp_vdf_iters,
sp_vdf_iters,
) | /salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/consensus/vdf_info_computation.py | 0.814901 | 0.373504 | vdf_info_computation.py | pypi |
from dataclasses import dataclass
from typing import List, Optional
from salvia.consensus.condition_costs import ConditionCost
from salvia.types.blockchain_format.program import SerializedProgram
from salvia.types.condition_opcodes import ConditionOpcode
from salvia.types.name_puzzle_condition import NPC
from salvia.util.ints import uint64, uint16
from salvia.util.streamable import Streamable, streamable
@dataclass(frozen=True)
@streamable
class NPCResult(Streamable):
error: Optional[uint16]
npc_list: List[NPC]
clvm_cost: uint64 # CLVM cost only, cost of conditions and tx size is not included
def calculate_cost_of_program(program: SerializedProgram, npc_result: NPCResult, cost_per_byte: int) -> uint64:
"""
This function calculates the total cost of either a block or a spendbundle
"""
total_cost = 0
total_cost += npc_result.clvm_cost
npc_list = npc_result.npc_list
# Add cost of conditions
npc: NPC
for npc in npc_list:
for condition, cvp_list in npc.condition_dict.items():
if condition is ConditionOpcode.AGG_SIG_UNSAFE or condition is ConditionOpcode.AGG_SIG_ME:
total_cost += len(cvp_list) * ConditionCost.AGG_SIG.value
elif condition is ConditionOpcode.CREATE_COIN:
total_cost += len(cvp_list) * ConditionCost.CREATE_COIN.value
elif condition is ConditionOpcode.ASSERT_SECONDS_ABSOLUTE:
total_cost += len(cvp_list) * ConditionCost.ASSERT_SECONDS_ABSOLUTE.value
elif condition is ConditionOpcode.ASSERT_SECONDS_RELATIVE:
total_cost += len(cvp_list) * ConditionCost.ASSERT_SECONDS_RELATIVE.value
elif condition is ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE:
total_cost += len(cvp_list) * ConditionCost.ASSERT_HEIGHT_ABSOLUTE.value
elif condition is ConditionOpcode.ASSERT_HEIGHT_RELATIVE:
total_cost += len(cvp_list) * ConditionCost.ASSERT_HEIGHT_RELATIVE.value
elif condition is ConditionOpcode.ASSERT_MY_COIN_ID:
total_cost += len(cvp_list) * ConditionCost.ASSERT_MY_COIN_ID.value
elif condition is ConditionOpcode.ASSERT_MY_PARENT_ID:
total_cost += len(cvp_list) * ConditionCost.ASSERT_MY_PARENT_ID.value
elif condition is ConditionOpcode.ASSERT_MY_PUZZLEHASH:
total_cost += len(cvp_list) * ConditionCost.ASSERT_MY_PUZZLEHASH.value
elif condition is ConditionOpcode.ASSERT_MY_AMOUNT:
total_cost += len(cvp_list) * ConditionCost.ASSERT_MY_AMOUNT.value
elif condition is ConditionOpcode.RESERVE_FEE:
total_cost += len(cvp_list) * ConditionCost.RESERVE_FEE.value
elif condition is ConditionOpcode.CREATE_COIN_ANNOUNCEMENT:
total_cost += len(cvp_list) * ConditionCost.CREATE_COIN_ANNOUNCEMENT.value
elif condition is ConditionOpcode.ASSERT_COIN_ANNOUNCEMENT:
total_cost += len(cvp_list) * ConditionCost.ASSERT_COIN_ANNOUNCEMENT.value
elif condition is ConditionOpcode.CREATE_PUZZLE_ANNOUNCEMENT:
total_cost += len(cvp_list) * ConditionCost.CREATE_PUZZLE_ANNOUNCEMENT.value
elif condition is ConditionOpcode.ASSERT_PUZZLE_ANNOUNCEMENT:
total_cost += len(cvp_list) * ConditionCost.ASSERT_PUZZLE_ANNOUNCEMENT.value
else:
# We ignore unknown conditions in order to allow for future soft forks
pass
# Add raw size of the program
total_cost += len(bytes(program)) * cost_per_byte
return uint64(total_cost) | /salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/consensus/cost_calculator.py | 0.614741 | 0.274302 | cost_calculator.py | pypi |
from typing import Optional
from salvia.consensus.block_record import BlockRecord
from salvia.consensus.constants import ConsensusConstants
from salvia.util.ints import uint8, uint32
def calculate_deficit(
constants: ConsensusConstants,
height: uint32,
prev_b: Optional[BlockRecord],
overflow: bool,
num_finished_sub_slots: int,
) -> uint8:
"""
Returns the deficit of the block to be created at height.
Args:
constants: consensus constants being used for this chain
height: block height of the block that we care about
prev_b: previous block
overflow: whether or not this is an overflow block
num_finished_sub_slots: the number of finished slots between infusion points of prev and current
"""
if height == 0:
return uint8(constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1)
else:
assert prev_b is not None
prev_deficit: uint8 = prev_b.deficit
if prev_deficit == constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:
# Prev sb must be an overflow sb. However maybe it's in a different sub-slot
if overflow:
if num_finished_sub_slots > 0:
# We are an overflow block, but in a new sub-slot, so we can decrease the deficit
return uint8(prev_deficit - 1)
# Still overflowed, so we cannot decrease the deficit
return uint8(prev_deficit)
else:
# We are no longer overflow, can decrease
return uint8(prev_deficit - 1)
elif prev_deficit == 0:
if num_finished_sub_slots == 0:
return uint8(0)
elif num_finished_sub_slots == 1:
if overflow:
return uint8(constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK)
else:
return uint8(constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1)
else:
# More than one finished sub slot, we can decrease deficit
return uint8(constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1)
else:
return uint8(prev_deficit - 1) | /salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/consensus/deficit.py | 0.865878 | 0.374819 | deficit.py | pypi |
import logging
from typing import List, Union
from salvia.consensus.block_record import BlockRecord
from salvia.consensus.blockchain_interface import BlockchainInterface
from salvia.consensus.constants import ConsensusConstants
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.types.full_block import FullBlock
from salvia.types.header_block import HeaderBlock
from salvia.types.unfinished_block import UnfinishedBlock
from salvia.types.unfinished_header_block import UnfinishedHeaderBlock
from salvia.util.ints import uint64
log = logging.getLogger(__name__)
def final_eos_is_already_included(
header_block: Union[UnfinishedHeaderBlock, UnfinishedBlock, HeaderBlock, FullBlock],
blocks: BlockchainInterface,
sub_slot_iters: uint64,
) -> bool:
"""
Args:
header_block: An overflow block, with potentially missing information about the new sub slot
blocks: all blocks that have been included before header_block
sub_slot_iters: sub_slot_iters at the header_block
Returns: True iff the missing sub slot was already included in a previous block. Returns False if the sub
slot was not included yet, and therefore it is the responsibility of this block to include it
"""
if len(header_block.finished_sub_slots) > 0:
# We already have an included empty sub slot, which means the prev block is 2 sub slots behind.
return False
curr: BlockRecord = blocks.block_record(header_block.prev_header_hash)
# We also check if curr is close to header_block, which means it's in the same sub slot
seen_overflow_block = curr.overflow and (header_block.total_iters - curr.total_iters < sub_slot_iters // 2)
while not curr.first_in_sub_slot and not curr.height == 0:
if curr.overflow and header_block.total_iters - curr.total_iters < sub_slot_iters // 2:
seen_overflow_block = True
curr = blocks.block_record(curr.prev_hash)
if curr.first_in_sub_slot and seen_overflow_block:
# We have seen another overflow block in this slot (same as header_block), therefore there are no
# missing sub slots
return True
# We have not seen any overflow blocks, therefore header_block will have to include the missing sub slot in
# the future
return False
def get_block_challenge(
constants: ConsensusConstants,
header_block: Union[UnfinishedHeaderBlock, UnfinishedBlock, HeaderBlock, FullBlock],
blocks: BlockchainInterface,
genesis_block: bool,
overflow: bool,
skip_overflow_last_ss_validation: bool,
):
if len(header_block.finished_sub_slots) > 0:
if overflow:
# New sub-slot with overflow block
if skip_overflow_last_ss_validation:
# In this case, we are missing the final sub-slot bundle (it's not finished yet), however
# There is a whole empty slot before this block is infused
challenge: bytes32 = header_block.finished_sub_slots[-1].challenge_chain.get_hash()
else:
challenge = header_block.finished_sub_slots[
-1
].challenge_chain.challenge_chain_end_of_slot_vdf.challenge
else:
# No overflow, new slot with a new challenge
challenge = header_block.finished_sub_slots[-1].challenge_chain.get_hash()
else:
if genesis_block:
challenge = constants.GENESIS_CHALLENGE
else:
if overflow:
if skip_overflow_last_ss_validation:
# Overflow infusion without the new slot, so get the last challenge
challenges_to_look_for = 1
else:
# Overflow infusion, so get the second to last challenge. skip_overflow_last_ss_validation is False,
# Which means no sub slots are omitted
challenges_to_look_for = 2
else:
challenges_to_look_for = 1
reversed_challenge_hashes: List[bytes32] = []
curr: BlockRecord = blocks.block_record(header_block.prev_header_hash)
while len(reversed_challenge_hashes) < challenges_to_look_for:
if curr.first_in_sub_slot:
assert curr.finished_challenge_slot_hashes is not None
reversed_challenge_hashes += reversed(curr.finished_challenge_slot_hashes)
if curr.height == 0:
assert curr.finished_challenge_slot_hashes is not None
assert len(curr.finished_challenge_slot_hashes) > 0
break
curr = blocks.block_record(curr.prev_hash)
challenge = reversed_challenge_hashes[challenges_to_look_for - 1]
return challenge | /salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/consensus/get_block_challenge.py | 0.80329 | 0.351464 | get_block_challenge.py | pypi |
import asyncio
import logging
import traceback
from concurrent.futures.process import ProcessPoolExecutor
from dataclasses import dataclass
from typing import Dict, List, Optional, Sequence, Tuple, Union, Callable
from salvia.consensus.block_header_validation import validate_finished_header_block
from salvia.consensus.block_record import BlockRecord
from salvia.consensus.blockchain_interface import BlockchainInterface
from salvia.consensus.constants import ConsensusConstants
from salvia.consensus.cost_calculator import NPCResult
from salvia.consensus.difficulty_adjustment import get_next_sub_slot_iters_and_difficulty
from salvia.consensus.full_block_to_block_record import block_to_block_record
from salvia.consensus.get_block_challenge import get_block_challenge
from salvia.consensus.pot_iterations import calculate_iterations_quality, is_overflow_block
from salvia.full_node.mempool_check_conditions import get_name_puzzle_conditions
from salvia.types.blockchain_format.coin import Coin
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from salvia.types.full_block import FullBlock
from salvia.types.generator_types import BlockGenerator
from salvia.types.header_block import HeaderBlock
from salvia.types.unfinished_block import UnfinishedBlock
from salvia.util.block_cache import BlockCache
from salvia.util.errors import Err, ValidationError
from salvia.util.generator_tools import get_block_header, tx_removals_and_additions
from salvia.util.ints import uint16, uint64, uint32
from salvia.util.streamable import Streamable, dataclass_from_dict, streamable
log = logging.getLogger(__name__)
@dataclass(frozen=True)
@streamable
class PreValidationResult(Streamable):
error: Optional[uint16]
required_iters: Optional[uint64] # Iff error is None
npc_result: Optional[NPCResult] # Iff error is None and block is a transaction block
def batch_pre_validate_blocks(
constants_dict: Dict,
blocks_pickled: Dict[bytes, bytes],
full_blocks_pickled: Optional[List[bytes]],
header_blocks_pickled: Optional[List[bytes]],
prev_transaction_generators: List[Optional[bytes]],
npc_results: Dict[uint32, bytes],
check_filter: bool,
expected_difficulty: List[uint64],
expected_sub_slot_iters: List[uint64],
) -> List[bytes]:
blocks = {}
for k, v in blocks_pickled.items():
blocks[k] = BlockRecord.from_bytes(v)
results: List[PreValidationResult] = []
constants: ConsensusConstants = dataclass_from_dict(ConsensusConstants, constants_dict)
if full_blocks_pickled is not None and header_blocks_pickled is not None:
assert ValueError("Only one should be passed here")
if full_blocks_pickled is not None:
for i in range(len(full_blocks_pickled)):
try:
block: FullBlock = FullBlock.from_bytes(full_blocks_pickled[i])
tx_additions: List[Coin] = []
removals: List[bytes32] = []
npc_result: Optional[NPCResult] = None
if block.height in npc_results:
npc_result = NPCResult.from_bytes(npc_results[block.height])
assert npc_result is not None
if npc_result.npc_list is not None:
removals, tx_additions = tx_removals_and_additions(npc_result.npc_list)
else:
removals, tx_additions = [], []
if block.transactions_generator is not None and npc_result is None:
prev_generator_bytes = prev_transaction_generators[i]
assert prev_generator_bytes is not None
assert block.transactions_info is not None
block_generator: BlockGenerator = BlockGenerator.from_bytes(prev_generator_bytes)
assert block_generator.program == block.transactions_generator
npc_result = get_name_puzzle_conditions(
block_generator,
min(constants.MAX_BLOCK_COST_CLVM, block.transactions_info.cost),
cost_per_byte=constants.COST_PER_BYTE,
safe_mode=True,
)
removals, tx_additions = tx_removals_and_additions(npc_result.npc_list)
header_block = get_block_header(block, tx_additions, removals)
required_iters, error = validate_finished_header_block(
constants,
BlockCache(blocks),
header_block,
check_filter,
expected_difficulty[i],
expected_sub_slot_iters[i],
)
error_int: Optional[uint16] = None
if error is not None:
error_int = uint16(error.code.value)
results.append(PreValidationResult(error_int, required_iters, npc_result))
except Exception:
error_stack = traceback.format_exc()
log.error(f"Exception: {error_stack}")
results.append(PreValidationResult(uint16(Err.UNKNOWN.value), None, None))
elif header_blocks_pickled is not None:
for i in range(len(header_blocks_pickled)):
try:
header_block = HeaderBlock.from_bytes(header_blocks_pickled[i])
required_iters, error = validate_finished_header_block(
constants,
BlockCache(blocks),
header_block,
check_filter,
expected_difficulty[i],
expected_sub_slot_iters[i],
)
error_int = None
if error is not None:
error_int = uint16(error.code.value)
results.append(PreValidationResult(error_int, required_iters, None))
except Exception:
error_stack = traceback.format_exc()
log.error(f"Exception: {error_stack}")
results.append(PreValidationResult(uint16(Err.UNKNOWN.value), None, None))
return [bytes(r) for r in results]
async def pre_validate_blocks_multiprocessing(
constants: ConsensusConstants,
constants_json: Dict,
block_records: BlockchainInterface,
blocks: Sequence[Union[FullBlock, HeaderBlock]],
pool: ProcessPoolExecutor,
check_filter: bool,
npc_results: Dict[uint32, NPCResult],
get_block_generator: Optional[Callable],
batch_size: int,
wp_summaries: Optional[List[SubEpochSummary]] = None,
) -> Optional[List[PreValidationResult]]:
"""
This method must be called under the blockchain lock
If all the full blocks pass pre-validation, (only validates header), returns the list of required iters.
if any validation issue occurs, returns False.
Args:
check_filter:
constants_json:
pool:
constants:
block_records:
blocks: list of full blocks to validate (must be connected to current chain)
npc_results
get_block_generator
"""
prev_b: Optional[BlockRecord] = None
# Collects all the recent blocks (up to the previous sub-epoch)
recent_blocks: Dict[bytes32, BlockRecord] = {}
recent_blocks_compressed: Dict[bytes32, BlockRecord] = {}
num_sub_slots_found = 0
num_blocks_seen = 0
if blocks[0].height > 0:
if not block_records.contains_block(blocks[0].prev_header_hash):
return [PreValidationResult(uint16(Err.INVALID_PREV_BLOCK_HASH.value), None, None)]
curr = block_records.block_record(blocks[0].prev_header_hash)
num_sub_slots_to_look_for = 3 if curr.overflow else 2
while (
curr.sub_epoch_summary_included is None
or num_blocks_seen < constants.NUMBER_OF_TIMESTAMPS
or num_sub_slots_found < num_sub_slots_to_look_for
) and curr.height > 0:
if num_blocks_seen < constants.NUMBER_OF_TIMESTAMPS or num_sub_slots_found < num_sub_slots_to_look_for:
recent_blocks_compressed[curr.header_hash] = curr
if curr.first_in_sub_slot:
assert curr.finished_challenge_slot_hashes is not None
num_sub_slots_found += len(curr.finished_challenge_slot_hashes)
recent_blocks[curr.header_hash] = curr
if curr.is_transaction_block:
num_blocks_seen += 1
curr = block_records.block_record(curr.prev_hash)
recent_blocks[curr.header_hash] = curr
recent_blocks_compressed[curr.header_hash] = curr
block_record_was_present = []
for block in blocks:
block_record_was_present.append(block_records.contains_block(block.header_hash))
diff_ssis: List[Tuple[uint64, uint64]] = []
for block in blocks:
if block.height != 0:
assert block_records.contains_block(block.prev_header_hash)
if prev_b is None:
prev_b = block_records.block_record(block.prev_header_hash)
sub_slot_iters, difficulty = get_next_sub_slot_iters_and_difficulty(
constants, len(block.finished_sub_slots) > 0, prev_b, block_records
)
overflow = is_overflow_block(constants, block.reward_chain_block.signage_point_index)
challenge = get_block_challenge(constants, block, BlockCache(recent_blocks), prev_b is None, overflow, False)
if block.reward_chain_block.challenge_chain_sp_vdf is None:
cc_sp_hash: bytes32 = challenge
else:
cc_sp_hash = block.reward_chain_block.challenge_chain_sp_vdf.output.get_hash()
q_str: Optional[bytes32] = block.reward_chain_block.proof_of_space.verify_and_get_quality_string(
constants, challenge, cc_sp_hash
)
if q_str is None:
for i, block_i in enumerate(blocks):
if not block_record_was_present[i] and block_records.contains_block(block_i.header_hash):
block_records.remove_block_record(block_i.header_hash)
return None
required_iters: uint64 = calculate_iterations_quality(
constants.DIFFICULTY_CONSTANT_FACTOR,
q_str,
block.reward_chain_block.proof_of_space.size,
difficulty,
cc_sp_hash,
)
block_rec = block_to_block_record(
constants,
block_records,
required_iters,
block,
None,
)
if block_rec.sub_epoch_summary_included is not None and wp_summaries is not None:
idx = int(block.height / constants.SUB_EPOCH_BLOCKS) - 1
next_ses = wp_summaries[idx]
if not block_rec.sub_epoch_summary_included.get_hash() == next_ses.get_hash():
log.error("sub_epoch_summary does not match wp sub_epoch_summary list")
return None
# Makes sure to not override the valid blocks already in block_records
if not block_records.contains_block(block_rec.header_hash):
block_records.add_block_record(block_rec) # Temporarily add block to dict
recent_blocks[block_rec.header_hash] = block_rec
recent_blocks_compressed[block_rec.header_hash] = block_rec
else:
recent_blocks[block_rec.header_hash] = block_records.block_record(block_rec.header_hash)
recent_blocks_compressed[block_rec.header_hash] = block_records.block_record(block_rec.header_hash)
prev_b = block_rec
diff_ssis.append((difficulty, sub_slot_iters))
block_dict: Dict[bytes32, Union[FullBlock, HeaderBlock]] = {}
for i, block in enumerate(blocks):
block_dict[block.header_hash] = block
if not block_record_was_present[i]:
block_records.remove_block_record(block.header_hash)
recent_sb_compressed_pickled = {bytes(k): bytes(v) for k, v in recent_blocks_compressed.items()}
npc_results_pickled = {}
for k, v in npc_results.items():
npc_results_pickled[k] = bytes(v)
futures = []
# Pool of workers to validate blocks concurrently
for i in range(0, len(blocks), batch_size):
end_i = min(i + batch_size, len(blocks))
blocks_to_validate = blocks[i:end_i]
if any([len(block.finished_sub_slots) > 0 for block in blocks_to_validate]):
final_pickled = {bytes(k): bytes(v) for k, v in recent_blocks.items()}
else:
final_pickled = recent_sb_compressed_pickled
b_pickled: Optional[List[bytes]] = None
hb_pickled: Optional[List[bytes]] = None
previous_generators: List[Optional[bytes]] = []
for block in blocks_to_validate:
# We ONLY add blocks which are in the past, based on header hashes (which are validated later) to the
# prev blocks dict. This is important since these blocks are assumed to be valid and are used as previous
# generator references
prev_blocks_dict: Dict[uint32, Union[FullBlock, HeaderBlock]] = {}
curr_b: Union[FullBlock, HeaderBlock] = block
while curr_b.prev_header_hash in block_dict:
curr_b = block_dict[curr_b.prev_header_hash]
prev_blocks_dict[curr_b.header_hash] = curr_b
if isinstance(block, FullBlock):
assert get_block_generator is not None
if b_pickled is None:
b_pickled = []
b_pickled.append(bytes(block))
try:
block_generator: Optional[BlockGenerator] = await get_block_generator(block, prev_blocks_dict)
except ValueError:
return None
if block_generator is not None:
previous_generators.append(bytes(block_generator))
else:
previous_generators.append(None)
else:
if hb_pickled is None:
hb_pickled = []
hb_pickled.append(bytes(block))
futures.append(
asyncio.get_running_loop().run_in_executor(
pool,
batch_pre_validate_blocks,
constants_json,
final_pickled,
b_pickled,
hb_pickled,
previous_generators,
npc_results_pickled,
check_filter,
[diff_ssis[j][0] for j in range(i, end_i)],
[diff_ssis[j][1] for j in range(i, end_i)],
)
)
# Collect all results into one flat list
return [
PreValidationResult.from_bytes(result)
for batch_result in (await asyncio.gather(*futures))
for result in batch_result
]
def _run_generator(
constants_dict: bytes,
unfinished_block_bytes: bytes,
block_generator_bytes: bytes,
) -> Tuple[Optional[Err], Optional[bytes]]:
"""
Runs the CLVM generator from bytes inputs. This is meant to be called under a ProcessPoolExecutor, in order to
validate the heavy parts of a block (clvm program) in a different process.
"""
try:
constants: ConsensusConstants = dataclass_from_dict(ConsensusConstants, constants_dict)
unfinished_block: UnfinishedBlock = UnfinishedBlock.from_bytes(unfinished_block_bytes)
assert unfinished_block.transactions_info is not None
block_generator: BlockGenerator = BlockGenerator.from_bytes(block_generator_bytes)
assert block_generator.program == unfinished_block.transactions_generator
npc_result: NPCResult = get_name_puzzle_conditions(
block_generator,
min(constants.MAX_BLOCK_COST_CLVM, unfinished_block.transactions_info.cost),
cost_per_byte=constants.COST_PER_BYTE,
safe_mode=False,
)
if npc_result.error is not None:
return Err(npc_result.error), None
except ValidationError as e:
return e.code, None
except Exception:
return Err.UNKNOWN, None
return None, bytes(npc_result) | /salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/consensus/multiprocess_validation.py | 0.78316 | 0.265939 | multiprocess_validation.py | pypi |
from typing import List, Optional, Union
from salvia.consensus.block_record import BlockRecord
from salvia.consensus.blockchain_interface import BlockchainInterface
from salvia.consensus.constants import ConsensusConstants
from salvia.consensus.deficit import calculate_deficit
from salvia.consensus.difficulty_adjustment import get_next_sub_slot_iters_and_difficulty
from salvia.consensus.make_sub_epoch_summary import make_sub_epoch_summary
from salvia.consensus.pot_iterations import is_overflow_block
from salvia.types.blockchain_format.classgroup import ClassgroupElement
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.types.blockchain_format.slots import ChallengeBlockInfo
from salvia.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from salvia.types.full_block import FullBlock
from salvia.types.header_block import HeaderBlock
from salvia.util.ints import uint8, uint32, uint64
def block_to_block_record(
constants: ConsensusConstants,
blocks: BlockchainInterface,
required_iters: uint64,
full_block: Optional[Union[FullBlock, HeaderBlock]],
header_block: Optional[HeaderBlock],
) -> BlockRecord:
if full_block is None:
assert header_block is not None
block: Union[HeaderBlock, FullBlock] = header_block
else:
block = full_block
prev_b = blocks.try_block_record(block.prev_header_hash)
if block.height > 0:
assert prev_b is not None
sub_slot_iters, _ = get_next_sub_slot_iters_and_difficulty(
constants, len(block.finished_sub_slots) > 0, prev_b, blocks
)
overflow = is_overflow_block(constants, block.reward_chain_block.signage_point_index)
deficit = calculate_deficit(
constants,
block.height,
prev_b,
overflow,
len(block.finished_sub_slots),
)
found_ses_hash: Optional[bytes32] = None
ses: Optional[SubEpochSummary] = None
if len(block.finished_sub_slots) > 0:
for sub_slot in block.finished_sub_slots:
if sub_slot.challenge_chain.subepoch_summary_hash is not None:
found_ses_hash = sub_slot.challenge_chain.subepoch_summary_hash
if found_ses_hash:
assert prev_b is not None
assert len(block.finished_sub_slots) > 0
ses = make_sub_epoch_summary(
constants,
blocks,
block.height,
blocks.block_record(prev_b.prev_hash),
block.finished_sub_slots[0].challenge_chain.new_difficulty,
block.finished_sub_slots[0].challenge_chain.new_sub_slot_iters,
)
assert ses.get_hash() == found_ses_hash
prev_transaction_block_height = uint32(0)
curr: Optional[BlockRecord] = blocks.try_block_record(block.prev_header_hash)
while curr is not None and not curr.is_transaction_block:
curr = blocks.try_block_record(curr.prev_hash)
if curr is not None and curr.is_transaction_block:
prev_transaction_block_height = curr.height
return header_block_to_sub_block_record(
constants,
required_iters,
block,
sub_slot_iters,
overflow,
deficit,
prev_transaction_block_height,
ses,
)
def header_block_to_sub_block_record(
constants: ConsensusConstants,
required_iters: uint64,
block: Union[FullBlock, HeaderBlock],
sub_slot_iters: uint64,
overflow: bool,
deficit: uint8,
prev_transaction_block_height: uint32,
ses: Optional[SubEpochSummary],
) -> BlockRecord:
reward_claims_incorporated = (
block.transactions_info.reward_claims_incorporated if block.transactions_info is not None else None
)
cbi = ChallengeBlockInfo(
block.reward_chain_block.proof_of_space,
block.reward_chain_block.challenge_chain_sp_vdf,
block.reward_chain_block.challenge_chain_sp_signature,
block.reward_chain_block.challenge_chain_ip_vdf,
)
if block.reward_chain_block.infused_challenge_chain_ip_vdf is not None:
icc_output: Optional[ClassgroupElement] = block.reward_chain_block.infused_challenge_chain_ip_vdf.output
else:
icc_output = None
if len(block.finished_sub_slots) > 0:
finished_challenge_slot_hashes: Optional[List[bytes32]] = [
sub_slot.challenge_chain.get_hash() for sub_slot in block.finished_sub_slots
]
finished_reward_slot_hashes: Optional[List[bytes32]] = [
sub_slot.reward_chain.get_hash() for sub_slot in block.finished_sub_slots
]
finished_infused_challenge_slot_hashes: Optional[List[bytes32]] = [
sub_slot.infused_challenge_chain.get_hash()
for sub_slot in block.finished_sub_slots
if sub_slot.infused_challenge_chain is not None
]
elif block.height == 0:
finished_challenge_slot_hashes = [constants.GENESIS_CHALLENGE]
finished_reward_slot_hashes = [constants.GENESIS_CHALLENGE]
finished_infused_challenge_slot_hashes = None
else:
finished_challenge_slot_hashes = None
finished_reward_slot_hashes = None
finished_infused_challenge_slot_hashes = None
prev_transaction_block_hash = (
block.foliage_transaction_block.prev_transaction_block_hash
if block.foliage_transaction_block is not None
else None
)
timestamp = block.foliage_transaction_block.timestamp if block.foliage_transaction_block is not None else None
fees = block.transactions_info.fees if block.transactions_info is not None else None
return BlockRecord(
block.header_hash,
block.prev_header_hash,
block.height,
block.weight,
block.total_iters,
block.reward_chain_block.signage_point_index,
block.reward_chain_block.challenge_chain_ip_vdf.output,
icc_output,
block.reward_chain_block.get_hash(),
cbi.get_hash(),
sub_slot_iters,
block.foliage.foliage_block_data.pool_target.puzzle_hash,
block.foliage.foliage_block_data.farmer_reward_puzzle_hash,
required_iters,
deficit,
overflow,
prev_transaction_block_height,
timestamp,
prev_transaction_block_hash,
fees,
reward_claims_incorporated,
finished_challenge_slot_hashes,
finished_infused_challenge_slot_hashes,
finished_reward_slot_hashes,
ses,
) | /salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/consensus/full_block_to_block_record.py | 0.840586 | 0.31384 | full_block_to_block_record.py | pypi |
from salvia.util.ints import uint64
from .constants import ConsensusConstants
testnet_kwargs = {
"SLOT_BLOCKS_TARGET": 32,
"MIN_BLOCKS_PER_CHALLENGE_BLOCK": 16, # Must be less than half of SLOT_BLOCKS_TARGET
"MAX_SUB_SLOT_BLOCKS": 128, # Must be less than half of SUB_EPOCH_BLOCKS
"NUM_SPS_SUB_SLOT": 64, # Must be a power of 2
"SUB_SLOT_ITERS_STARTING": 2 ** 27,
# DIFFICULTY_STARTING is the starting difficulty for the first epoch, which is then further
# multiplied by another factor of DIFFICULTY_CONSTANT_FACTOR, to be used in the VDF iter calculation formula.
"DIFFICULTY_CONSTANT_FACTOR": 2 ** 67,
"DIFFICULTY_STARTING": 7,
"DIFFICULTY_CHANGE_MAX_FACTOR": 3, # The next difficulty is truncated to range [prev / FACTOR, prev * FACTOR]
# These 3 constants must be changed at the same time
"SUB_EPOCH_BLOCKS": 384, # The number of blocks per sub-epoch, mainnet 384
"EPOCH_BLOCKS": 4608, # The number of blocks per epoch, mainnet 4608. Must be multiple of SUB_EPOCH_SB
"SIGNIFICANT_BITS": 8, # The number of bits to look at in difficulty and min iters. The rest are zeroed
"DISCRIMINANT_SIZE_BITS": 1024, # Max is 1024 (based on ClassGroupElement int size)
"NUMBER_ZERO_BITS_PLOT_FILTER": 9, # H(plot signature of the challenge) must start with these many zeroes
"MIN_PLOT_SIZE": 32, # 32 for mainnet
"MAX_PLOT_SIZE": 50,
"SUB_SLOT_TIME_TARGET": 600, # The target number of seconds per slot, mainnet 600
"NUM_SP_INTERVALS_EXTRA": 3, # The number of sp intervals to add to the signage point
"MAX_FUTURE_TIME": 5 * 60, # The next block can have a timestamp of at most these many seconds in the future
"NUMBER_OF_TIMESTAMPS": 11, # Than the average of the last NUMBER_OF_TIMESTAMPS blocks
# Used as the initial cc rc challenges, as well as first block back pointers, and first SES back pointer
# We override this value based on the chain being run (testnet0, testnet1, mainnet, etc)
# Default used for tests is std_hash(b'')
"GENESIS_CHALLENGE": bytes.fromhex("57e70eb412169069cbc12183963aadb3d1b928f7248c3cc8d4db480a52027c84"),
# Forks of salvia should change this value to provide replay attack protection. This is set to mainnet genesis chall
"AGG_SIG_ME_ADDITIONAL_DATA": bytes.fromhex("cd712de5a54aa420613d90b4c0f7ad28de3ca1f3edef0ef3fe12d2721c067802"),
"GENESIS_PRE_FARM_POOL_PUZZLE_HASH": bytes.fromhex(
"86f83e6488b1ab859e3ce5beb6fca3dec3ee2f9b0e01d57605cc61da72aeb195"
),
"GENESIS_PRE_FARM_FARMER_PUZZLE_HASH": bytes.fromhex(
"d34204e34e9ca9440cfa9bf66d3bef14e954a78733e09d5b8e536060916588d3"
),
"MAX_VDF_WITNESS_SIZE": 64,
# Size of mempool = 50x the size of block # temporary change until #9125 gets in
"MEMPOOL_BLOCK_BUFFER": 10,
# Max coin amount, fits into 64 bits
"MAX_COIN_AMOUNT": uint64((1 << 64) - 1),
# Max block cost in clvm cost units
"MAX_BLOCK_COST_CLVM": 11000000000,
# The cost per byte of generator program
"COST_PER_BYTE": 12000,
"WEIGHT_PROOF_THRESHOLD": 2,
"BLOCKS_CACHE_SIZE": 4608 + (128 * 4),
"WEIGHT_PROOF_RECENT_BLOCKS": 1000,
"MAX_BLOCK_COUNT_PER_REQUESTS": 32, # Allow up to 32 blocks per request
"NETWORK_TYPE": 0,
"MAX_GENERATOR_SIZE": 1000000,
"MAX_GENERATOR_REF_LIST_SIZE": 512, # Number of references allowed in the block generator ref list
"POOL_SUB_SLOT_ITERS": 37600000000, # iters limit * NUM_SPS
}
DEFAULT_CONSTANTS = ConsensusConstants(**testnet_kwargs) # type: ignore | /salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/consensus/default_constants.py | 0.551574 | 0.39601 | default_constants.py | pypi |
import logging
import random
from dataclasses import replace
from typing import Callable, Dict, List, Optional, Tuple
import blspy
from blspy import G1Element, G2Element
from chiabip158 import PyBIP158
from salvia.consensus.block_record import BlockRecord
from salvia.consensus.block_rewards import calculate_base_farmer_reward, calculate_pool_reward
from salvia.consensus.blockchain_interface import BlockchainInterface
from salvia.consensus.coinbase import create_farmer_coin, create_pool_coin
from salvia.consensus.constants import ConsensusConstants
from salvia.consensus.cost_calculator import NPCResult, calculate_cost_of_program
from salvia.full_node.mempool_check_conditions import get_name_puzzle_conditions
from salvia.full_node.signage_point import SignagePoint
from salvia.types.blockchain_format.coin import Coin, hash_coin_list
from salvia.types.blockchain_format.foliage import Foliage, FoliageBlockData, FoliageTransactionBlock, TransactionsInfo
from salvia.types.blockchain_format.pool_target import PoolTarget
from salvia.types.blockchain_format.proof_of_space import ProofOfSpace
from salvia.types.blockchain_format.reward_chain_block import RewardChainBlock, RewardChainBlockUnfinished
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.types.blockchain_format.vdf import VDFInfo, VDFProof
from salvia.types.end_of_slot_bundle import EndOfSubSlotBundle
from salvia.types.full_block import FullBlock
from salvia.types.generator_types import BlockGenerator
from salvia.types.unfinished_block import UnfinishedBlock
from salvia.util.hash import std_hash
from salvia.util.ints import uint8, uint32, uint64, uint128
from salvia.util.merkle_set import MerkleSet
from salvia.util.prev_transaction_block import get_prev_transaction_block
from salvia.util.recursive_replace import recursive_replace
log = logging.getLogger(__name__)
def create_foliage(
constants: ConsensusConstants,
reward_block_unfinished: RewardChainBlockUnfinished,
block_generator: Optional[BlockGenerator],
aggregate_sig: G2Element,
additions: List[Coin],
removals: List[Coin],
prev_block: Optional[BlockRecord],
blocks: BlockchainInterface,
total_iters_sp: uint128,
timestamp: uint64,
farmer_reward_puzzlehash: bytes32,
pool_target: PoolTarget,
get_plot_signature: Callable[[bytes32, G1Element], G2Element],
get_pool_signature: Callable[[PoolTarget, Optional[G1Element]], Optional[G2Element]],
seed: bytes32 = b"",
) -> Tuple[Foliage, Optional[FoliageTransactionBlock], Optional[TransactionsInfo]]:
"""
Creates a foliage for a given reward chain block. This may or may not be a tx block. In the case of a tx block,
the return values are not None. This is called at the signage point, so some of this information may be
tweaked at the infusion point.
Args:
constants: consensus constants being used for this chain
reward_block_unfinished: the reward block to look at, potentially at the signage point
block_generator: transactions to add to the foliage block, if created
aggregate_sig: aggregate of all transctions (or infinity element)
prev_block: the previous block at the signage point
blocks: dict from header hash to blocks, of all ancestor blocks
total_iters_sp: total iters at the signage point
timestamp: timestamp to put into the foliage block
farmer_reward_puzzlehash: where to pay out farming reward
pool_target: where to pay out pool reward
get_plot_signature: retrieve the signature corresponding to the plot public key
get_pool_signature: retrieve the signature corresponding to the pool public key
seed: seed to randomize block
"""
if prev_block is not None:
res = get_prev_transaction_block(prev_block, blocks, total_iters_sp)
is_transaction_block: bool = res[0]
prev_transaction_block: Optional[BlockRecord] = res[1]
else:
# Genesis is a transaction block
prev_transaction_block = None
is_transaction_block = True
random.seed(seed)
# Use the extension data to create different blocks based on header hash
extension_data: bytes32 = random.randint(0, 100000000).to_bytes(32, "big")
if prev_block is None:
height: uint32 = uint32(0)
else:
height = uint32(prev_block.height + 1)
# Create filter
byte_array_tx: List[bytes32] = []
tx_additions: List[Coin] = []
tx_removals: List[bytes32] = []
pool_target_signature: Optional[G2Element] = get_pool_signature(
pool_target, reward_block_unfinished.proof_of_space.pool_public_key
)
foliage_data = FoliageBlockData(
reward_block_unfinished.get_hash(),
pool_target,
pool_target_signature,
farmer_reward_puzzlehash,
extension_data,
)
foliage_block_data_signature: G2Element = get_plot_signature(
foliage_data.get_hash(),
reward_block_unfinished.proof_of_space.plot_public_key,
)
prev_block_hash: bytes32 = constants.GENESIS_CHALLENGE
if height != 0:
assert prev_block is not None
prev_block_hash = prev_block.header_hash
generator_block_heights_list: List[uint32] = []
if is_transaction_block:
cost = uint64(0)
# Calculate the cost of transactions
if block_generator is not None:
generator_block_heights_list = block_generator.block_height_list()
result: NPCResult = get_name_puzzle_conditions(
block_generator,
constants.MAX_BLOCK_COST_CLVM,
cost_per_byte=constants.COST_PER_BYTE,
safe_mode=True,
)
cost = calculate_cost_of_program(block_generator.program, result, constants.COST_PER_BYTE)
removal_amount = 0
addition_amount = 0
for coin in removals:
removal_amount += coin.amount
for coin in additions:
addition_amount += coin.amount
spend_bundle_fees = removal_amount - addition_amount
else:
spend_bundle_fees = 0
reward_claims_incorporated = []
if height > 0:
assert prev_transaction_block is not None
assert prev_block is not None
curr: BlockRecord = prev_block
while not curr.is_transaction_block:
curr = blocks.block_record(curr.prev_hash)
assert curr.fees is not None
pool_coin = create_pool_coin(
curr.height, curr.pool_puzzle_hash, calculate_pool_reward(curr.height), constants.GENESIS_CHALLENGE
)
farmer_coin = create_farmer_coin(
curr.height,
curr.farmer_puzzle_hash,
uint64(calculate_base_farmer_reward(curr.height) + curr.fees),
constants.GENESIS_CHALLENGE,
)
assert curr.header_hash == prev_transaction_block.header_hash
reward_claims_incorporated += [pool_coin, farmer_coin]
if curr.height > 0:
curr = blocks.block_record(curr.prev_hash)
# Prev block is not genesis
while not curr.is_transaction_block:
pool_coin = create_pool_coin(
curr.height,
curr.pool_puzzle_hash,
calculate_pool_reward(curr.height),
constants.GENESIS_CHALLENGE,
)
farmer_coin = create_farmer_coin(
curr.height,
curr.farmer_puzzle_hash,
calculate_base_farmer_reward(curr.height),
constants.GENESIS_CHALLENGE,
)
reward_claims_incorporated += [pool_coin, farmer_coin]
curr = blocks.block_record(curr.prev_hash)
additions.extend(reward_claims_incorporated.copy())
for coin in additions:
tx_additions.append(coin)
byte_array_tx.append(bytearray(coin.puzzle_hash))
for coin in removals:
tx_removals.append(coin.name())
byte_array_tx.append(bytearray(coin.name()))
bip158: PyBIP158 = PyBIP158(byte_array_tx)
encoded = bytes(bip158.GetEncoded())
removal_merkle_set = MerkleSet()
addition_merkle_set = MerkleSet()
# Create removal Merkle set
for coin_name in tx_removals:
removal_merkle_set.add_already_hashed(coin_name)
# Create addition Merkle set
puzzlehash_coin_map: Dict[bytes32, List[Coin]] = {}
for coin in tx_additions:
if coin.puzzle_hash in puzzlehash_coin_map:
puzzlehash_coin_map[coin.puzzle_hash].append(coin)
else:
puzzlehash_coin_map[coin.puzzle_hash] = [coin]
# Addition Merkle set contains puzzlehash and hash of all coins with that puzzlehash
for puzzle, coins in puzzlehash_coin_map.items():
addition_merkle_set.add_already_hashed(puzzle)
addition_merkle_set.add_already_hashed(hash_coin_list(coins))
additions_root = addition_merkle_set.get_root()
removals_root = removal_merkle_set.get_root()
generator_hash = bytes32([0] * 32)
if block_generator is not None:
generator_hash = std_hash(block_generator.program)
generator_refs_hash = bytes32([1] * 32)
if generator_block_heights_list not in (None, []):
generator_ref_list_bytes = b"".join([bytes(i) for i in generator_block_heights_list])
generator_refs_hash = std_hash(generator_ref_list_bytes)
filter_hash: bytes32 = std_hash(encoded)
transactions_info: Optional[TransactionsInfo] = TransactionsInfo(
generator_hash,
generator_refs_hash,
aggregate_sig,
uint64(spend_bundle_fees),
cost,
reward_claims_incorporated,
)
if prev_transaction_block is None:
prev_transaction_block_hash: bytes32 = constants.GENESIS_CHALLENGE
else:
prev_transaction_block_hash = prev_transaction_block.header_hash
assert transactions_info is not None
foliage_transaction_block: Optional[FoliageTransactionBlock] = FoliageTransactionBlock(
prev_transaction_block_hash,
timestamp,
filter_hash,
additions_root,
removals_root,
transactions_info.get_hash(),
)
assert foliage_transaction_block is not None
foliage_transaction_block_hash: Optional[bytes32] = foliage_transaction_block.get_hash()
foliage_transaction_block_signature: Optional[G2Element] = get_plot_signature(
foliage_transaction_block_hash, reward_block_unfinished.proof_of_space.plot_public_key
)
assert foliage_transaction_block_signature is not None
else:
foliage_transaction_block_hash = None
foliage_transaction_block_signature = None
foliage_transaction_block = None
transactions_info = None
assert (foliage_transaction_block_hash is None) == (foliage_transaction_block_signature is None)
foliage = Foliage(
prev_block_hash,
reward_block_unfinished.get_hash(),
foliage_data,
foliage_block_data_signature,
foliage_transaction_block_hash,
foliage_transaction_block_signature,
)
return foliage, foliage_transaction_block, transactions_info
def create_unfinished_block(
constants: ConsensusConstants,
sub_slot_start_total_iters: uint128,
sub_slot_iters: uint64,
signage_point_index: uint8,
sp_iters: uint64,
ip_iters: uint64,
proof_of_space: ProofOfSpace,
slot_cc_challenge: bytes32,
farmer_reward_puzzle_hash: bytes32,
pool_target: PoolTarget,
get_plot_signature: Callable[[bytes32, G1Element], G2Element],
get_pool_signature: Callable[[PoolTarget, Optional[G1Element]], Optional[G2Element]],
signage_point: SignagePoint,
timestamp: uint64,
blocks: BlockchainInterface,
seed: bytes32 = b"",
block_generator: Optional[BlockGenerator] = None,
aggregate_sig: G2Element = G2Element(),
additions: Optional[List[Coin]] = None,
removals: Optional[List[Coin]] = None,
prev_block: Optional[BlockRecord] = None,
finished_sub_slots_input: List[EndOfSubSlotBundle] = None,
) -> UnfinishedBlock:
"""
Creates a new unfinished block using all the information available at the signage point. This will have to be
modified using information from the infusion point.
Args:
constants: consensus constants being used for this chain
sub_slot_start_total_iters: the starting sub-slot iters at the signage point sub-slot
sub_slot_iters: sub-slot-iters at the infusion point epoch
signage_point_index: signage point index of the block to create
sp_iters: sp_iters of the block to create
ip_iters: ip_iters of the block to create
proof_of_space: proof of space of the block to create
slot_cc_challenge: challenge hash at the sp sub-slot
farmer_reward_puzzle_hash: where to pay out farmer rewards
pool_target: where to pay out pool rewards
get_plot_signature: function that returns signature corresponding to plot public key
get_pool_signature: function that returns signature corresponding to pool public key
signage_point: signage point information (VDFs)
timestamp: timestamp to add to the foliage block, if created
seed: seed to randomize chain
block_generator: transactions to add to the foliage block, if created
aggregate_sig: aggregate of all transctions (or infinity element)
additions: Coins added in spend_bundle
removals: Coins removed in spend_bundle
prev_block: previous block (already in chain) from the signage point
blocks: dictionary from header hash to SBR of all included SBR
finished_sub_slots_input: finished_sub_slots at the signage point
Returns:
"""
if finished_sub_slots_input is None:
finished_sub_slots: List[EndOfSubSlotBundle] = []
else:
finished_sub_slots = finished_sub_slots_input.copy()
overflow: bool = sp_iters > ip_iters
total_iters_sp: uint128 = uint128(sub_slot_start_total_iters + sp_iters)
is_genesis: bool = prev_block is None
new_sub_slot: bool = len(finished_sub_slots) > 0
cc_sp_hash: Optional[bytes32] = slot_cc_challenge
# Only enters this if statement if we are in testing mode (making VDF proofs here)
if signage_point.cc_vdf is not None:
assert signage_point.rc_vdf is not None
cc_sp_hash = signage_point.cc_vdf.output.get_hash()
rc_sp_hash = signage_point.rc_vdf.output.get_hash()
else:
if new_sub_slot:
rc_sp_hash = finished_sub_slots[-1].reward_chain.get_hash()
else:
if is_genesis:
rc_sp_hash = constants.GENESIS_CHALLENGE
else:
assert prev_block is not None
assert blocks is not None
curr = prev_block
while not curr.first_in_sub_slot:
curr = blocks.block_record(curr.prev_hash)
assert curr.finished_reward_slot_hashes is not None
rc_sp_hash = curr.finished_reward_slot_hashes[-1]
signage_point = SignagePoint(None, None, None, None)
cc_sp_signature: Optional[G2Element] = get_plot_signature(cc_sp_hash, proof_of_space.plot_public_key)
rc_sp_signature: Optional[G2Element] = get_plot_signature(rc_sp_hash, proof_of_space.plot_public_key)
assert cc_sp_signature is not None
assert rc_sp_signature is not None
assert blspy.AugSchemeMPL.verify(proof_of_space.plot_public_key, cc_sp_hash, cc_sp_signature)
total_iters = uint128(sub_slot_start_total_iters + ip_iters + (sub_slot_iters if overflow else 0))
rc_block = RewardChainBlockUnfinished(
total_iters,
signage_point_index,
slot_cc_challenge,
proof_of_space,
signage_point.cc_vdf,
cc_sp_signature,
signage_point.rc_vdf,
rc_sp_signature,
)
if additions is None:
additions = []
if removals is None:
removals = []
(foliage, foliage_transaction_block, transactions_info,) = create_foliage(
constants,
rc_block,
block_generator,
aggregate_sig,
additions,
removals,
prev_block,
blocks,
total_iters_sp,
timestamp,
farmer_reward_puzzle_hash,
pool_target,
get_plot_signature,
get_pool_signature,
seed,
)
return UnfinishedBlock(
finished_sub_slots,
rc_block,
signage_point.cc_proof,
signage_point.rc_proof,
foliage,
foliage_transaction_block,
transactions_info,
block_generator.program if block_generator else None,
block_generator.block_height_list() if block_generator else [],
)
def unfinished_block_to_full_block(
unfinished_block: UnfinishedBlock,
cc_ip_vdf: VDFInfo,
cc_ip_proof: VDFProof,
rc_ip_vdf: VDFInfo,
rc_ip_proof: VDFProof,
icc_ip_vdf: Optional[VDFInfo],
icc_ip_proof: Optional[VDFProof],
finished_sub_slots: List[EndOfSubSlotBundle],
prev_block: Optional[BlockRecord],
blocks: BlockchainInterface,
total_iters_sp: uint128,
difficulty: uint64,
) -> FullBlock:
"""
Converts an unfinished block to a finished block. Includes all the infusion point VDFs as well as tweaking
other properties (height, weight, sub-slots, etc)
Args:
unfinished_block: the unfinished block to finish
cc_ip_vdf: the challenge chain vdf info at the infusion point
cc_ip_proof: the challenge chain proof
rc_ip_vdf: the reward chain vdf info at the infusion point
rc_ip_proof: the reward chain proof
icc_ip_vdf: the infused challenge chain vdf info at the infusion point
icc_ip_proof: the infused challenge chain proof
finished_sub_slots: finished sub slots from the prev block to the infusion point
prev_block: prev block from the infusion point
blocks: dictionary from header hash to SBR of all included SBR
total_iters_sp: total iters at the signage point
difficulty: difficulty at the infusion point
"""
# Replace things that need to be replaced, since foliage blocks did not necessarily have the latest information
if prev_block is None:
is_transaction_block = True
new_weight = uint128(difficulty)
new_height = uint32(0)
new_foliage = unfinished_block.foliage
new_foliage_transaction_block = unfinished_block.foliage_transaction_block
new_tx_info = unfinished_block.transactions_info
new_generator = unfinished_block.transactions_generator
new_generator_ref_list = unfinished_block.transactions_generator_ref_list
else:
is_transaction_block, _ = get_prev_transaction_block(prev_block, blocks, total_iters_sp)
new_weight = uint128(prev_block.weight + difficulty)
new_height = uint32(prev_block.height + 1)
if is_transaction_block:
new_fbh = unfinished_block.foliage.foliage_transaction_block_hash
new_fbs = unfinished_block.foliage.foliage_transaction_block_signature
new_foliage_transaction_block = unfinished_block.foliage_transaction_block
new_tx_info = unfinished_block.transactions_info
new_generator = unfinished_block.transactions_generator
new_generator_ref_list = unfinished_block.transactions_generator_ref_list
else:
new_fbh = None
new_fbs = None
new_foliage_transaction_block = None
new_tx_info = None
new_generator = None
new_generator_ref_list = []
assert (new_fbh is None) == (new_fbs is None)
new_foliage = replace(
unfinished_block.foliage,
prev_block_hash=prev_block.header_hash,
foliage_transaction_block_hash=new_fbh,
foliage_transaction_block_signature=new_fbs,
)
ret = FullBlock(
finished_sub_slots,
RewardChainBlock(
new_weight,
new_height,
unfinished_block.reward_chain_block.total_iters,
unfinished_block.reward_chain_block.signage_point_index,
unfinished_block.reward_chain_block.pos_ss_cc_challenge_hash,
unfinished_block.reward_chain_block.proof_of_space,
unfinished_block.reward_chain_block.challenge_chain_sp_vdf,
unfinished_block.reward_chain_block.challenge_chain_sp_signature,
cc_ip_vdf,
unfinished_block.reward_chain_block.reward_chain_sp_vdf,
unfinished_block.reward_chain_block.reward_chain_sp_signature,
rc_ip_vdf,
icc_ip_vdf,
is_transaction_block,
),
unfinished_block.challenge_chain_sp_proof,
cc_ip_proof,
unfinished_block.reward_chain_sp_proof,
rc_ip_proof,
icc_ip_proof,
new_foliage,
new_foliage_transaction_block,
new_tx_info,
new_generator,
new_generator_ref_list,
)
return recursive_replace(
ret,
"foliage.reward_block_hash",
ret.reward_chain_block.get_hash(),
) | /salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/consensus/block_creation.py | 0.875015 | 0.329216 | block_creation.py | pypi |
import asyncio
import dataclasses
import logging
import multiprocessing
from concurrent.futures.process import ProcessPoolExecutor
from enum import Enum
from typing import Dict, List, Optional, Set, Tuple, Union
from clvm.casts import int_from_bytes
from salvia.consensus.block_body_validation import validate_block_body
from salvia.consensus.block_header_validation import validate_finished_header_block, validate_unfinished_header_block
from salvia.consensus.block_record import BlockRecord
from salvia.consensus.blockchain_interface import BlockchainInterface
from salvia.consensus.constants import ConsensusConstants
from salvia.consensus.cost_calculator import NPCResult
from salvia.consensus.difficulty_adjustment import get_next_sub_slot_iters_and_difficulty
from salvia.consensus.find_fork_point import find_fork_point_in_chain
from salvia.consensus.full_block_to_block_record import block_to_block_record
from salvia.consensus.multiprocess_validation import (
PreValidationResult,
pre_validate_blocks_multiprocessing,
_run_generator,
)
from salvia.full_node.block_store import BlockStore
from salvia.full_node.coin_store import CoinStore
from salvia.full_node.hint_store import HintStore
from salvia.full_node.mempool_check_conditions import get_name_puzzle_conditions
from salvia.types.blockchain_format.coin import Coin
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from salvia.types.blockchain_format.vdf import VDFInfo
from salvia.types.coin_record import CoinRecord
from salvia.types.condition_opcodes import ConditionOpcode
from salvia.types.end_of_slot_bundle import EndOfSubSlotBundle
from salvia.types.full_block import FullBlock
from salvia.types.generator_types import BlockGenerator, GeneratorArg
from salvia.types.header_block import HeaderBlock
from salvia.types.unfinished_block import UnfinishedBlock
from salvia.types.unfinished_header_block import UnfinishedHeaderBlock
from salvia.types.weight_proof import SubEpochChallengeSegment
from salvia.util.errors import Err, ConsensusError
from salvia.util.generator_tools import get_block_header, tx_removals_and_additions
from salvia.util.ints import uint16, uint32, uint64, uint128
from salvia.util.streamable import recurse_jsonify
log = logging.getLogger(__name__)
class ReceiveBlockResult(Enum):
"""
When Blockchain.receive_block(b) is called, one of these results is returned,
showing whether the block was added to the chain (extending the peak),
and if not, why it was not added.
"""
NEW_PEAK = 1 # Added to the peak of the blockchain
ADDED_AS_ORPHAN = 2 # Added as an orphan/stale block (not a new peak of the chain)
INVALID_BLOCK = 3 # Block was not added because it was invalid
ALREADY_HAVE_BLOCK = 4 # Block is already present in this blockchain
DISCONNECTED_BLOCK = 5 # Block's parent (previous pointer) is not in this blockchain
class Blockchain(BlockchainInterface):
constants: ConsensusConstants
constants_json: Dict
# peak of the blockchain
_peak_height: Optional[uint32]
# All blocks in peak path are guaranteed to be included, can include orphan blocks
__block_records: Dict[bytes32, BlockRecord]
# all hashes of blocks in block_record by height, used for garbage collection
__heights_in_cache: Dict[uint32, Set[bytes32]]
# Defines the path from genesis to the peak, no orphan blocks
__height_to_hash: Dict[uint32, bytes32]
# All sub-epoch summaries that have been included in the blockchain from the beginning until and including the peak
# (height_included, SubEpochSummary). Note: ONLY for the blocks in the path to the peak
__sub_epoch_summaries: Dict[uint32, SubEpochSummary] = {}
# Unspent Store
coin_store: CoinStore
# Store
block_store: BlockStore
# Used to verify blocks in parallel
pool: ProcessPoolExecutor
# Set holding seen compact proofs, in order to avoid duplicates.
_seen_compact_proofs: Set[Tuple[VDFInfo, uint32]]
# Whether blockchain is shut down or not
_shut_down: bool
# Lock to prevent simultaneous reads and writes
lock: asyncio.Lock
compact_proof_lock: asyncio.Lock
hint_store: HintStore
@staticmethod
async def create(
coin_store: CoinStore, block_store: BlockStore, consensus_constants: ConsensusConstants, hint_store: HintStore
):
"""
Initializes a blockchain with the BlockRecords from disk, assuming they have all been
validated. Uses the genesis block given in override_constants, or as a fallback,
in the consensus constants config.
"""
self = Blockchain()
self.lock = asyncio.Lock() # External lock handled by full node
self.compact_proof_lock = asyncio.Lock()
cpu_count = multiprocessing.cpu_count()
if cpu_count > 61:
cpu_count = 61 # Windows Server 2016 has an issue https://bugs.python.org/issue26903
num_workers = max(cpu_count - 2, 1)
self.pool = ProcessPoolExecutor(max_workers=num_workers)
log.info(f"Started {num_workers} processes for block validation")
self.constants = consensus_constants
self.coin_store = coin_store
self.block_store = block_store
self.constants_json = recurse_jsonify(dataclasses.asdict(self.constants))
self._shut_down = False
await self._load_chain_from_store()
self._seen_compact_proofs = set()
self.hint_store = hint_store
return self
def shut_down(self):
self._shut_down = True
self.pool.shutdown(wait=True)
async def _load_chain_from_store(self) -> None:
"""
Initializes the state of the Blockchain class from the database.
"""
height_to_hash, sub_epoch_summaries = await self.block_store.get_peak_height_dicts()
self.__height_to_hash = height_to_hash
self.__sub_epoch_summaries = sub_epoch_summaries
self.__block_records = {}
self.__heights_in_cache = {}
block_records, peak = await self.block_store.get_block_records_close_to_peak(self.constants.BLOCKS_CACHE_SIZE)
for block in block_records.values():
self.add_block_record(block)
if len(block_records) == 0:
assert peak is None
self._peak_height = None
return None
assert peak is not None
self._peak_height = self.block_record(peak).height
assert len(self.__height_to_hash) == self._peak_height + 1
def get_peak(self) -> Optional[BlockRecord]:
"""
Return the peak of the blockchain
"""
if self._peak_height is None:
return None
return self.height_to_block_record(self._peak_height)
async def get_full_peak(self) -> Optional[FullBlock]:
if self._peak_height is None:
return None
""" Return list of FullBlocks that are peaks"""
block = await self.block_store.get_full_block(self.height_to_hash(self._peak_height))
assert block is not None
return block
async def get_full_block(self, header_hash: bytes32) -> Optional[FullBlock]:
return await self.block_store.get_full_block(header_hash)
async def receive_block(
self,
block: FullBlock,
pre_validation_result: Optional[PreValidationResult] = None,
fork_point_with_peak: Optional[uint32] = None,
) -> Tuple[
ReceiveBlockResult,
Optional[Err],
Optional[uint32],
Tuple[List[CoinRecord], Dict[bytes, Dict[bytes32, CoinRecord]]],
]:
"""
This method must be called under the blockchain lock
Adds a new block into the blockchain, if it's valid and connected to the current
blockchain, regardless of whether it is the child of a head, or another block.
Returns a header if block is added to head. Returns an error if the block is
invalid. Also returns the fork height, in the case of a new peak.
"""
genesis: bool = block.height == 0
if self.contains_block(block.header_hash):
return ReceiveBlockResult.ALREADY_HAVE_BLOCK, None, None, ([], {})
if not self.contains_block(block.prev_header_hash) and not genesis:
return (ReceiveBlockResult.DISCONNECTED_BLOCK, Err.INVALID_PREV_BLOCK_HASH, None, ([], {}))
if not genesis and (self.block_record(block.prev_header_hash).height + 1) != block.height:
return ReceiveBlockResult.INVALID_BLOCK, Err.INVALID_HEIGHT, None, ([], {})
npc_result: Optional[NPCResult] = None
if pre_validation_result is None:
if block.height == 0:
prev_b: Optional[BlockRecord] = None
else:
prev_b = self.block_record(block.prev_header_hash)
sub_slot_iters, difficulty = get_next_sub_slot_iters_and_difficulty(
self.constants, len(block.finished_sub_slots) > 0, prev_b, self
)
if block.is_transaction_block():
if block.transactions_generator is not None:
try:
block_generator: Optional[BlockGenerator] = await self.get_block_generator(block)
except ValueError:
return ReceiveBlockResult.INVALID_BLOCK, Err.GENERATOR_REF_HAS_NO_GENERATOR, None, ([], {})
assert block_generator is not None and block.transactions_info is not None
npc_result = get_name_puzzle_conditions(
block_generator,
min(self.constants.MAX_BLOCK_COST_CLVM, block.transactions_info.cost),
cost_per_byte=self.constants.COST_PER_BYTE,
safe_mode=False,
)
removals, tx_additions = tx_removals_and_additions(npc_result.npc_list)
else:
removals, tx_additions = [], []
header_block = get_block_header(block, tx_additions, removals)
else:
npc_result = None
header_block = get_block_header(block, [], [])
required_iters, error = validate_finished_header_block(
self.constants,
self,
header_block,
False,
difficulty,
sub_slot_iters,
)
if error is not None:
return ReceiveBlockResult.INVALID_BLOCK, error.code, None, ([], {})
else:
npc_result = pre_validation_result.npc_result
required_iters = pre_validation_result.required_iters
assert pre_validation_result.error is None
assert required_iters is not None
error_code, _ = await validate_block_body(
self.constants,
self,
self.block_store,
self.coin_store,
self.get_peak(),
block,
block.height,
npc_result,
fork_point_with_peak,
self.get_block_generator,
)
if error_code is not None:
return ReceiveBlockResult.INVALID_BLOCK, error_code, None, ([], {})
block_record = block_to_block_record(
self.constants,
self,
required_iters,
block,
None,
)
# Always add the block to the database
async with self.block_store.db_wrapper.lock:
try:
header_hash: bytes32 = block.header_hash
# Perform the DB operations to update the state, and rollback if something goes wrong
await self.block_store.db_wrapper.begin_transaction()
await self.block_store.add_full_block(header_hash, block, block_record)
fork_height, peak_height, records, (coin_record_change, hint_changes) = await self._reconsider_peak(
block_record, genesis, fork_point_with_peak, npc_result
)
await self.block_store.db_wrapper.commit_transaction()
# Then update the memory cache. It is important that this task is not cancelled and does not throw
self.add_block_record(block_record)
for fetched_block_record in records:
self.__height_to_hash[fetched_block_record.height] = fetched_block_record.header_hash
if fetched_block_record.sub_epoch_summary_included is not None:
self.__sub_epoch_summaries[
fetched_block_record.height
] = fetched_block_record.sub_epoch_summary_included
if peak_height is not None:
self._peak_height = peak_height
except BaseException:
self.block_store.rollback_cache_block(header_hash)
await self.block_store.db_wrapper.rollback_transaction()
raise
if fork_height is not None:
# new coin records added
assert coin_record_change is not None
return ReceiveBlockResult.NEW_PEAK, None, fork_height, (coin_record_change, hint_changes)
else:
return ReceiveBlockResult.ADDED_AS_ORPHAN, None, None, ([], {})
def get_hint_list(self, npc_result: NPCResult) -> List[Tuple[bytes32, bytes]]:
h_list = []
for npc in npc_result.npc_list:
for opcode, conditions in npc.conditions:
if opcode == ConditionOpcode.CREATE_COIN:
for condition in conditions:
if len(condition.vars) > 2 and condition.vars[2] != b"":
puzzle_hash, amount_bin = condition.vars[0], condition.vars[1]
amount = int_from_bytes(amount_bin)
coin_id = Coin(npc.coin_name, puzzle_hash, amount).name()
h_list.append((coin_id, condition.vars[2]))
return h_list
async def _reconsider_peak(
self,
block_record: BlockRecord,
genesis: bool,
fork_point_with_peak: Optional[uint32],
npc_result: Optional[NPCResult],
) -> Tuple[
Optional[uint32],
Optional[uint32],
List[BlockRecord],
Tuple[List[CoinRecord], Dict[bytes, Dict[bytes32, CoinRecord]]],
]:
"""
When a new block is added, this is called, to check if the new block is the new peak of the chain.
This also handles reorgs by reverting blocks which are not in the heaviest chain.
It returns the height of the fork between the previous chain and the new chain, or returns
None if there was no update to the heaviest chain.
"""
peak = self.get_peak()
lastest_coin_state: Dict[bytes32, CoinRecord] = {}
hint_coin_state: Dict[bytes32, Dict[bytes32, CoinRecord]] = {}
if genesis:
if peak is None:
block: Optional[FullBlock] = await self.block_store.get_full_block(block_record.header_hash)
assert block is not None
if npc_result is not None:
tx_removals, tx_additions = tx_removals_and_additions(npc_result.npc_list)
else:
tx_removals, tx_additions = [], []
if block.is_transaction_block():
assert block.foliage_transaction_block is not None
added = await self.coin_store.new_block(
block.height,
block.foliage_transaction_block.timestamp,
block.get_included_reward_coins(),
tx_additions,
tx_removals,
)
else:
added, _ = [], []
await self.block_store.set_peak(block_record.header_hash)
return uint32(0), uint32(0), [block_record], (added, {})
return None, None, [], ([], {})
assert peak is not None
if block_record.weight > peak.weight:
# Find the fork. if the block is just being appended, it will return the peak
# If no blocks in common, returns -1, and reverts all blocks
if block_record.prev_hash == peak.header_hash:
fork_height: int = peak.height
elif fork_point_with_peak is not None:
fork_height = fork_point_with_peak
else:
fork_height = find_fork_point_in_chain(self, block_record, peak)
if block_record.prev_hash != peak.header_hash:
roll_changes: List[CoinRecord] = await self.coin_store.rollback_to_block(fork_height)
for coin_record in roll_changes:
lastest_coin_state[coin_record.name] = coin_record
# Rollback sub_epoch_summaries
heights_to_delete = []
for ses_included_height in self.__sub_epoch_summaries.keys():
if ses_included_height > fork_height:
heights_to_delete.append(ses_included_height)
for height in heights_to_delete:
log.info(f"delete ses at height {height}")
del self.__sub_epoch_summaries[height]
# Collect all blocks from fork point to new peak
blocks_to_add: List[Tuple[FullBlock, BlockRecord]] = []
curr = block_record.header_hash
while fork_height < 0 or curr != self.height_to_hash(uint32(fork_height)):
fetched_full_block: Optional[FullBlock] = await self.block_store.get_full_block(curr)
fetched_block_record: Optional[BlockRecord] = await self.block_store.get_block_record(curr)
assert fetched_full_block is not None
assert fetched_block_record is not None
blocks_to_add.append((fetched_full_block, fetched_block_record))
if fetched_full_block.height == 0:
# Doing a full reorg, starting at height 0
break
curr = fetched_block_record.prev_hash
records_to_add = []
for fetched_full_block, fetched_block_record in reversed(blocks_to_add):
records_to_add.append(fetched_block_record)
if fetched_full_block.is_transaction_block():
if fetched_block_record.header_hash == block_record.header_hash:
tx_removals, tx_additions, npc_res = await self.get_tx_removals_and_additions(
fetched_full_block, npc_result
)
else:
tx_removals, tx_additions, npc_res = await self.get_tx_removals_and_additions(
fetched_full_block, None
)
assert fetched_full_block.foliage_transaction_block is not None
added_rec = await self.coin_store.new_block(
fetched_full_block.height,
fetched_full_block.foliage_transaction_block.timestamp,
fetched_full_block.get_included_reward_coins(),
tx_additions,
tx_removals,
)
removed_rec: List[Optional[CoinRecord]] = [
await self.coin_store.get_coin_record(name) for name in tx_removals
]
# Set additions first, then removals in order to handle ephemeral coin state
# Add in height order is also required
record: Optional[CoinRecord]
for record in added_rec:
assert record
lastest_coin_state[record.name] = record
for record in removed_rec:
assert record
lastest_coin_state[record.name] = record
if npc_res is not None:
hint_list: List[Tuple[bytes32, bytes]] = self.get_hint_list(npc_res)
await self.hint_store.add_hints(hint_list)
# There can be multiple coins for the same hint
for coin_id, hint in hint_list:
key = hint
if key not in hint_coin_state:
hint_coin_state[key] = {}
hint_coin_state[key][coin_id] = lastest_coin_state[coin_id]
# Changes the peak to be the new peak
await self.block_store.set_peak(block_record.header_hash)
return (
uint32(max(fork_height, 0)),
block_record.height,
records_to_add,
(list(lastest_coin_state.values()), hint_coin_state),
)
# This is not a heavier block than the heaviest we have seen, so we don't change the coin set
return None, None, [], ([], {})
async def get_tx_removals_and_additions(
self, block: FullBlock, npc_result: Optional[NPCResult] = None
) -> Tuple[List[bytes32], List[Coin], Optional[NPCResult]]:
if block.is_transaction_block():
if block.transactions_generator is not None:
if npc_result is None:
block_generator: Optional[BlockGenerator] = await self.get_block_generator(block)
assert block_generator is not None
npc_result = get_name_puzzle_conditions(
block_generator,
self.constants.MAX_BLOCK_COST_CLVM,
cost_per_byte=self.constants.COST_PER_BYTE,
safe_mode=False,
)
tx_removals, tx_additions = tx_removals_and_additions(npc_result.npc_list)
return tx_removals, tx_additions, npc_result
else:
return [], [], None
else:
return [], [], None
def get_next_difficulty(self, header_hash: bytes32, new_slot: bool) -> uint64:
assert self.contains_block(header_hash)
curr = self.block_record(header_hash)
if curr.height <= 2:
return self.constants.DIFFICULTY_STARTING
return get_next_sub_slot_iters_and_difficulty(self.constants, new_slot, curr, self)[1]
def get_next_slot_iters(self, header_hash: bytes32, new_slot: bool) -> uint64:
assert self.contains_block(header_hash)
curr = self.block_record(header_hash)
if curr.height <= 2:
return self.constants.SUB_SLOT_ITERS_STARTING
return get_next_sub_slot_iters_and_difficulty(self.constants, new_slot, curr, self)[0]
async def get_sp_and_ip_sub_slots(
self, header_hash: bytes32
) -> Optional[Tuple[Optional[EndOfSubSlotBundle], Optional[EndOfSubSlotBundle]]]:
block: Optional[FullBlock] = await self.block_store.get_full_block(header_hash)
if block is None:
return None
curr_br: BlockRecord = self.block_record(block.header_hash)
is_overflow = curr_br.overflow
curr: Optional[FullBlock] = block
assert curr is not None
while True:
if curr_br.first_in_sub_slot:
curr = await self.block_store.get_full_block(curr_br.header_hash)
assert curr is not None
break
if curr_br.height == 0:
break
curr_br = self.block_record(curr_br.prev_hash)
if len(curr.finished_sub_slots) == 0:
# This means we got to genesis and still no sub-slots
return None, None
ip_sub_slot = curr.finished_sub_slots[-1]
if not is_overflow:
# Pos sub-slot is the same as infusion sub slot
return None, ip_sub_slot
if len(curr.finished_sub_slots) > 1:
# Have both sub-slots
return curr.finished_sub_slots[-2], ip_sub_slot
prev_curr: Optional[FullBlock] = await self.block_store.get_full_block(curr.prev_header_hash)
if prev_curr is None:
assert curr.height == 0
prev_curr = curr
prev_curr_br = self.block_record(curr.header_hash)
else:
prev_curr_br = self.block_record(curr.prev_header_hash)
assert prev_curr_br is not None
while prev_curr_br.height > 0:
if prev_curr_br.first_in_sub_slot:
prev_curr = await self.block_store.get_full_block(prev_curr_br.header_hash)
assert prev_curr is not None
break
prev_curr_br = self.block_record(prev_curr_br.prev_hash)
if len(prev_curr.finished_sub_slots) == 0:
return None, ip_sub_slot
return prev_curr.finished_sub_slots[-1], ip_sub_slot
def get_recent_reward_challenges(self) -> List[Tuple[bytes32, uint128]]:
peak = self.get_peak()
if peak is None:
return []
recent_rc: List[Tuple[bytes32, uint128]] = []
curr: Optional[BlockRecord] = peak
while curr is not None and len(recent_rc) < 2 * self.constants.MAX_SUB_SLOT_BLOCKS:
if curr != peak:
recent_rc.append((curr.reward_infusion_new_challenge, curr.total_iters))
if curr.first_in_sub_slot:
assert curr.finished_reward_slot_hashes is not None
sub_slot_total_iters = curr.ip_sub_slot_total_iters(self.constants)
# Start from the most recent
for rc in reversed(curr.finished_reward_slot_hashes):
if sub_slot_total_iters < curr.sub_slot_iters:
break
recent_rc.append((rc, sub_slot_total_iters))
sub_slot_total_iters = uint128(sub_slot_total_iters - curr.sub_slot_iters)
curr = self.try_block_record(curr.prev_hash)
return list(reversed(recent_rc))
async def validate_unfinished_block(
self, block: UnfinishedBlock, npc_result: Optional[NPCResult], skip_overflow_ss_validation=True
) -> PreValidationResult:
if (
not self.contains_block(block.prev_header_hash)
and not block.prev_header_hash == self.constants.GENESIS_CHALLENGE
):
return PreValidationResult(uint16(Err.INVALID_PREV_BLOCK_HASH.value), None, None)
unfinished_header_block = UnfinishedHeaderBlock(
block.finished_sub_slots,
block.reward_chain_block,
block.challenge_chain_sp_proof,
block.reward_chain_sp_proof,
block.foliage,
block.foliage_transaction_block,
b"",
)
prev_b = self.try_block_record(unfinished_header_block.prev_header_hash)
sub_slot_iters, difficulty = get_next_sub_slot_iters_and_difficulty(
self.constants, len(unfinished_header_block.finished_sub_slots) > 0, prev_b, self
)
required_iters, error = validate_unfinished_header_block(
self.constants,
self,
unfinished_header_block,
False,
difficulty,
sub_slot_iters,
skip_overflow_ss_validation,
)
if error is not None:
return PreValidationResult(uint16(error.code.value), None, None)
prev_height = (
-1
if block.prev_header_hash == self.constants.GENESIS_CHALLENGE
else self.block_record(block.prev_header_hash).height
)
error_code, cost_result = await validate_block_body(
self.constants,
self,
self.block_store,
self.coin_store,
self.get_peak(),
block,
uint32(prev_height + 1),
npc_result,
None,
self.get_block_generator,
False,
)
if error_code is not None:
return PreValidationResult(uint16(error_code.value), None, None)
return PreValidationResult(None, required_iters, cost_result)
async def pre_validate_blocks_multiprocessing(
self,
blocks: List[FullBlock],
npc_results: Dict[uint32, NPCResult],
batch_size: int = 4,
wp_summaries: Optional[List[SubEpochSummary]] = None,
) -> Optional[List[PreValidationResult]]:
return await pre_validate_blocks_multiprocessing(
self.constants,
self.constants_json,
self,
blocks,
self.pool,
True,
npc_results,
self.get_block_generator,
batch_size,
wp_summaries,
)
async def run_generator(self, unfinished_block: bytes, generator: BlockGenerator) -> NPCResult:
task = asyncio.get_running_loop().run_in_executor(
self.pool,
_run_generator,
self.constants_json,
unfinished_block,
bytes(generator),
)
error, npc_result_bytes = await task
if error is not None:
raise ConsensusError(error)
if npc_result_bytes is None:
raise ConsensusError(Err.UNKNOWN)
return NPCResult.from_bytes(npc_result_bytes)
def contains_block(self, header_hash: bytes32) -> bool:
"""
True if we have already added this block to the chain. This may return false for orphan blocks
that we have added but no longer keep in memory.
"""
return header_hash in self.__block_records
def block_record(self, header_hash: bytes32) -> BlockRecord:
return self.__block_records[header_hash]
def height_to_block_record(self, height: uint32) -> BlockRecord:
header_hash = self.height_to_hash(height)
return self.block_record(header_hash)
def get_ses_heights(self) -> List[uint32]:
return sorted(self.__sub_epoch_summaries.keys())
def get_ses(self, height: uint32) -> SubEpochSummary:
return self.__sub_epoch_summaries[height]
def height_to_hash(self, height: uint32) -> Optional[bytes32]:
return self.__height_to_hash[height]
def contains_height(self, height: uint32) -> bool:
return height in self.__height_to_hash
def get_peak_height(self) -> Optional[uint32]:
return self._peak_height
async def warmup(self, fork_point: uint32):
"""
Loads blocks into the cache. The blocks loaded include all blocks from
fork point - BLOCKS_CACHE_SIZE up to and including the fork_point.
Args:
fork_point: the last block height to load in the cache
"""
if self._peak_height is None:
return None
block_records = await self.block_store.get_block_records_in_range(
max(fork_point - self.constants.BLOCKS_CACHE_SIZE, uint32(0)), fork_point
)
for block_record in block_records.values():
self.add_block_record(block_record)
def clean_block_record(self, height: int):
"""
Clears all block records in the cache which have block_record < height.
Args:
height: Minimum height that we need to keep in the cache
"""
if height < 0:
return None
blocks_to_remove = self.__heights_in_cache.get(uint32(height), None)
while blocks_to_remove is not None and height >= 0:
for header_hash in blocks_to_remove:
del self.__block_records[header_hash] # remove from blocks
del self.__heights_in_cache[uint32(height)] # remove height from heights in cache
if height == 0:
break
height = height - 1
blocks_to_remove = self.__heights_in_cache.get(uint32(height), None)
def clean_block_records(self):
"""
Cleans the cache so that we only maintain relevant blocks. This removes
block records that have height < peak - BLOCKS_CACHE_SIZE.
These blocks are necessary for calculating future difficulty adjustments.
"""
if len(self.__block_records) < self.constants.BLOCKS_CACHE_SIZE:
return None
assert self._peak_height is not None
if self._peak_height - self.constants.BLOCKS_CACHE_SIZE < 0:
return None
self.clean_block_record(self._peak_height - self.constants.BLOCKS_CACHE_SIZE)
async def get_block_records_in_range(self, start: int, stop: int) -> Dict[bytes32, BlockRecord]:
return await self.block_store.get_block_records_in_range(start, stop)
async def get_header_blocks_in_range(
self, start: int, stop: int, tx_filter: bool = True
) -> Dict[bytes32, HeaderBlock]:
hashes = []
for height in range(start, stop + 1):
if self.contains_height(uint32(height)):
header_hash: bytes32 = self.height_to_hash(uint32(height))
hashes.append(header_hash)
blocks: List[FullBlock] = []
for hash in hashes.copy():
block = self.block_store.block_cache.get(hash)
if block is not None:
blocks.append(block)
hashes.remove(hash)
blocks_on_disk: List[FullBlock] = await self.block_store.get_blocks_by_hash(hashes)
blocks.extend(blocks_on_disk)
header_blocks: Dict[bytes32, HeaderBlock] = {}
for block in blocks:
if self.height_to_hash(block.height) != block.header_hash:
raise ValueError(f"Block at {block.header_hash} is no longer in the blockchain (it's in a fork)")
if tx_filter is False:
header = get_block_header(block, [], [])
else:
tx_additions: List[CoinRecord] = [
c for c in (await self.coin_store.get_coins_added_at_height(block.height)) if not c.coinbase
]
removed: List[CoinRecord] = await self.coin_store.get_coins_removed_at_height(block.height)
header = get_block_header(
block, [record.coin for record in tx_additions], [record.coin.name() for record in removed]
)
header_blocks[header.header_hash] = header
return header_blocks
async def get_header_block_by_height(
self, height: int, header_hash: bytes32, tx_filter: bool = True
) -> Optional[HeaderBlock]:
header_dict: Dict[bytes32, HeaderBlock] = await self.get_header_blocks_in_range(height, height, tx_filter)
if len(header_dict) == 0:
return None
if header_hash not in header_dict:
return None
return header_dict[header_hash]
async def get_block_records_at(self, heights: List[uint32], batch_size=900) -> List[BlockRecord]:
"""
gets block records by height (only blocks that are part of the chain)
"""
records: List[BlockRecord] = []
hashes = []
assert batch_size < 999 # sqlite in python 3.7 has a limit on 999 variables in queries
for height in heights:
hashes.append(self.height_to_hash(height))
if len(hashes) > batch_size:
res = await self.block_store.get_block_records_by_hash(hashes)
records.extend(res)
hashes = []
if len(hashes) > 0:
res = await self.block_store.get_block_records_by_hash(hashes)
records.extend(res)
return records
async def get_block_record_from_db(self, header_hash: bytes32) -> Optional[BlockRecord]:
if header_hash in self.__block_records:
return self.__block_records[header_hash]
return await self.block_store.get_block_record(header_hash)
def remove_block_record(self, header_hash: bytes32):
sbr = self.block_record(header_hash)
del self.__block_records[header_hash]
self.__heights_in_cache[sbr.height].remove(header_hash)
def add_block_record(self, block_record: BlockRecord):
"""
Adds a block record to the cache.
"""
self.__block_records[block_record.header_hash] = block_record
if block_record.height not in self.__heights_in_cache.keys():
self.__heights_in_cache[block_record.height] = set()
self.__heights_in_cache[block_record.height].add(block_record.header_hash)
async def persist_sub_epoch_challenge_segments(
self, ses_block_hash: bytes32, segments: List[SubEpochChallengeSegment]
):
return await self.block_store.persist_sub_epoch_challenge_segments(ses_block_hash, segments)
async def get_sub_epoch_challenge_segments(
self,
ses_block_hash: bytes32,
) -> Optional[List[SubEpochChallengeSegment]]:
segments: Optional[List[SubEpochChallengeSegment]] = await self.block_store.get_sub_epoch_challenge_segments(
ses_block_hash
)
if segments is None:
return None
return segments
# Returns 'True' if the info is already in the set, otherwise returns 'False' and stores it.
def seen_compact_proofs(self, vdf_info: VDFInfo, height: uint32) -> bool:
pot_tuple = (vdf_info, height)
if pot_tuple in self._seen_compact_proofs:
return True
# Periodically cleanup to keep size small. TODO: make this smarter, like FIFO.
if len(self._seen_compact_proofs) > 10000:
self._seen_compact_proofs.clear()
self._seen_compact_proofs.add(pot_tuple)
return False
async def get_block_generator(
self, block: Union[FullBlock, UnfinishedBlock], additional_blocks=None
) -> Optional[BlockGenerator]:
if additional_blocks is None:
additional_blocks = {}
ref_list = block.transactions_generator_ref_list
if block.transactions_generator is None:
assert len(ref_list) == 0
return None
if len(ref_list) == 0:
return BlockGenerator(block.transactions_generator, [])
result: List[GeneratorArg] = []
previous_block_hash = block.prev_header_hash
if (
self.try_block_record(previous_block_hash)
and self.height_to_hash(self.block_record(previous_block_hash).height) == previous_block_hash
):
# We are not in a reorg, no need to look up alternate header hashes (we can get them from height_to_hash)
for ref_height in block.transactions_generator_ref_list:
header_hash = self.height_to_hash(ref_height)
ref_block = await self.get_full_block(header_hash)
assert ref_block is not None
if ref_block.transactions_generator is None:
raise ValueError(Err.GENERATOR_REF_HAS_NO_GENERATOR)
result.append(GeneratorArg(ref_block.height, ref_block.transactions_generator))
else:
# First tries to find the blocks in additional_blocks
reorg_chain: Dict[uint32, FullBlock] = {}
curr: Union[FullBlock, UnfinishedBlock] = block
additional_height_dict = {}
while curr.prev_header_hash in additional_blocks:
prev: FullBlock = additional_blocks[curr.prev_header_hash]
additional_height_dict[prev.height] = prev
if isinstance(curr, FullBlock):
assert curr.height == prev.height + 1
reorg_chain[prev.height] = prev
curr = prev
peak: Optional[BlockRecord] = self.get_peak()
if self.contains_block(curr.prev_header_hash) and peak is not None:
# Then we look up blocks up to fork point one at a time, backtracking
previous_block_hash = curr.prev_header_hash
prev_block_record = await self.block_store.get_block_record(previous_block_hash)
prev_block = await self.block_store.get_full_block(previous_block_hash)
assert prev_block is not None
assert prev_block_record is not None
fork = find_fork_point_in_chain(self, peak, prev_block_record)
curr_2: Optional[FullBlock] = prev_block
assert curr_2 is not None and isinstance(curr_2, FullBlock)
reorg_chain[curr_2.height] = curr_2
while curr_2.height > fork and curr_2.height > 0:
curr_2 = await self.block_store.get_full_block(curr_2.prev_header_hash)
assert curr_2 is not None
reorg_chain[curr_2.height] = curr_2
for ref_height in block.transactions_generator_ref_list:
if ref_height in reorg_chain:
ref_block = reorg_chain[ref_height]
assert ref_block is not None
if ref_block.transactions_generator is None:
raise ValueError(Err.GENERATOR_REF_HAS_NO_GENERATOR)
result.append(GeneratorArg(ref_block.height, ref_block.transactions_generator))
else:
if ref_height in additional_height_dict:
ref_block = additional_height_dict[ref_height]
else:
header_hash = self.height_to_hash(ref_height)
ref_block = await self.get_full_block(header_hash)
assert ref_block is not None
if ref_block.transactions_generator is None:
raise ValueError(Err.GENERATOR_REF_HAS_NO_GENERATOR)
result.append(GeneratorArg(ref_block.height, ref_block.transactions_generator))
assert len(result) == len(ref_list)
return BlockGenerator(block.transactions_generator, result) | /salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/consensus/blockchain.py | 0.828904 | 0.174059 | blockchain.py | pypi |
from salvia.util.ints import uint32, uint64
# 1 Salvia coin = 1,000,000,000,000 = 1 trillion seed.
_seed_per_salvia = 1000000000000
_blocks_per_year = 1681920 # 32 * 6 * 24 * 365
def calculate_pool_reward(height: uint32) -> uint64:
"""
Returns the pool reward at a certain block height. The pool earns 7/8 of the reward in each block. If the farmer
is solo farming, they act as the pool, and therefore earn the entire block reward.
These halving events will not be hit at the exact times
(3 years, etc), due to fluctuations in difficulty. They will likely come early, if the network space and VDF
rates increase continuously.
"""
if height == 0:
return uint64(int((7 / 8) * 21000000 * _seed_per_salvia))
elif height < 3 * _blocks_per_year:
return uint64(int((7 / 8) * 2 * _seed_per_salvia))
elif height < 6 * _blocks_per_year:
return uint64(int((7 / 8) * 1 * _seed_per_salvia))
elif height < 9 * _blocks_per_year:
return uint64(int((7 / 8) * 0.5 * _seed_per_salvia))
elif height < 12 * _blocks_per_year:
return uint64(int((7 / 8) * 0.25 * _seed_per_salvia))
else:
return uint64(int((7 / 8) * 0.125 * _seed_per_salvia))
def calculate_base_farmer_reward(height: uint32) -> uint64:
"""
Returns the base farmer reward at a certain block height.
The base fee reward is 1/8 of total block reward
Returns the coinbase reward at a certain block height. These halving events will not be hit at the exact times
(3 years, etc), due to fluctuations in difficulty. They will likely come early, if the network space and VDF
rates increase continuously.
"""
if height == 0:
return uint64(int((1 / 8) * 21000000 * _seed_per_salvia))
elif height < 3 * _blocks_per_year:
return uint64(int((1 / 8) * 2 * _seed_per_salvia))
elif height < 6 * _blocks_per_year:
return uint64(int((1 / 8) * 1 * _seed_per_salvia))
elif height < 9 * _blocks_per_year:
return uint64(int((1 / 8) * 0.5 * _seed_per_salvia))
elif height < 12 * _blocks_per_year:
return uint64(int((1 / 8) * 0.25 * _seed_per_salvia))
else:
return uint64(int((1 / 8) * 0.125 * _seed_per_salvia)) | /salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/consensus/block_rewards.py | 0.717111 | 0.598107 | block_rewards.py | pypi |
from dataclasses import dataclass
from typing import List, Optional
from salvia.consensus.constants import ConsensusConstants
from salvia.consensus.pot_iterations import calculate_ip_iters, calculate_sp_iters
from salvia.types.blockchain_format.classgroup import ClassgroupElement
from salvia.types.blockchain_format.coin import Coin
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from salvia.util.ints import uint8, uint32, uint64, uint128
from salvia.util.streamable import Streamable, streamable
@dataclass(frozen=True)
@streamable
class BlockRecord(Streamable):
"""
This class is not included or hashed into the blockchain, but it is kept in memory as a more
efficient way to maintain data about the blockchain. This allows us to validate future blocks,
difficulty adjustments, etc, without saving the whole header block in memory.
"""
header_hash: bytes32
prev_hash: bytes32 # Header hash of the previous block
height: uint32
weight: uint128 # Total cumulative difficulty of all ancestor blocks since genesis
total_iters: uint128 # Total number of VDF iterations since genesis, including this block
signage_point_index: uint8
challenge_vdf_output: ClassgroupElement # This is the intermediary VDF output at ip_iters in challenge chain
infused_challenge_vdf_output: Optional[
ClassgroupElement
] # This is the intermediary VDF output at ip_iters in infused cc, iff deficit <= 3
reward_infusion_new_challenge: bytes32 # The reward chain infusion output, input to next VDF
challenge_block_info_hash: bytes32 # Hash of challenge chain data, used to validate end of slots in the future
sub_slot_iters: uint64 # Current network sub_slot_iters parameter
pool_puzzle_hash: bytes32 # Need to keep track of these because Coins are created in a future block
farmer_puzzle_hash: bytes32
required_iters: uint64 # The number of iters required for this proof of space
deficit: uint8 # A deficit of 16 is an overflow block after an infusion. Deficit of 15 is a challenge block
overflow: bool
prev_transaction_block_height: uint32
# Transaction block (present iff is_transaction_block)
timestamp: Optional[uint64]
prev_transaction_block_hash: Optional[bytes32] # Header hash of the previous transaction block
fees: Optional[uint64]
reward_claims_incorporated: Optional[List[Coin]]
# Slot (present iff this is the first SB in sub slot)
finished_challenge_slot_hashes: Optional[List[bytes32]]
finished_infused_challenge_slot_hashes: Optional[List[bytes32]]
finished_reward_slot_hashes: Optional[List[bytes32]]
# Sub-epoch (present iff this is the first SB after sub-epoch)
sub_epoch_summary_included: Optional[SubEpochSummary]
@property
def is_transaction_block(self) -> bool:
return self.timestamp is not None
@property
def first_in_sub_slot(self) -> bool:
return self.finished_challenge_slot_hashes is not None
def is_challenge_block(self, constants: ConsensusConstants) -> bool:
return self.deficit == constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1
def sp_sub_slot_total_iters(self, constants: ConsensusConstants) -> uint128:
if self.overflow:
return uint128(self.total_iters - self.ip_iters(constants) - self.sub_slot_iters)
else:
return uint128(self.total_iters - self.ip_iters(constants))
def ip_sub_slot_total_iters(self, constants: ConsensusConstants) -> uint128:
return uint128(self.total_iters - self.ip_iters(constants))
def sp_iters(self, constants: ConsensusConstants) -> uint64:
return calculate_sp_iters(constants, self.sub_slot_iters, self.signage_point_index)
def ip_iters(self, constants: ConsensusConstants) -> uint64:
return calculate_ip_iters(
constants,
self.sub_slot_iters,
self.signage_point_index,
self.required_iters,
)
def sp_total_iters(self, constants: ConsensusConstants):
return self.sp_sub_slot_total_iters(constants) + self.sp_iters(constants) | /salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/consensus/block_record.py | 0.920999 | 0.400984 | block_record.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.