id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
51
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
17,300
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/configuration_db/sip_config_db/scheduling/_scheduling_object.py
SchedulingObject.get_property
def get_property(self, property_key: str) -> str: """Get a scheduling object property.""" self._check_object_exists() return DB.get_hash_value(self.key, property_key)
python
def get_property(self, property_key: str) -> str: self._check_object_exists() return DB.get_hash_value(self.key, property_key)
[ "def", "get_property", "(", "self", ",", "property_key", ":", "str", ")", "->", "str", ":", "self", ".", "_check_object_exists", "(", ")", "return", "DB", ".", "get_hash_value", "(", "self", ".", "key", ",", "property_key", ")" ]
Get a scheduling object property.
[ "Get", "a", "scheduling", "object", "property", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/configuration_db/sip_config_db/scheduling/_scheduling_object.py#L60-L63
17,301
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/configuration_db/sip_config_db/scheduling/_scheduling_object.py
SchedulingObject.set_status
def set_status(self, value): """Set the status of the scheduling object.""" self._check_object_exists() DB.set_hash_value(self.key, 'status', value) self.publish('status_changed', event_data=dict(status=value))
python
def set_status(self, value): self._check_object_exists() DB.set_hash_value(self.key, 'status', value) self.publish('status_changed', event_data=dict(status=value))
[ "def", "set_status", "(", "self", ",", "value", ")", ":", "self", ".", "_check_object_exists", "(", ")", "DB", ".", "set_hash_value", "(", "self", ".", "key", ",", "'status'", ",", "value", ")", "self", ".", "publish", "(", "'status_changed'", ",", "even...
Set the status of the scheduling object.
[ "Set", "the", "status", "of", "the", "scheduling", "object", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/configuration_db/sip_config_db/scheduling/_scheduling_object.py#L81-L85
17,302
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/configuration_db/sip_config_db/scheduling/_scheduling_object.py
SchedulingObject.publish
def publish(self, event_type: str, event_data: dict = None): """Publish an event associated with the scheduling object. Note: Ideally publish should not be used directly but by other methods which perform actions on the object. Args: event_type (str): Type of event. event_data (dict, optional): Event data. """ import inspect import os.path _stack = inspect.stack() _origin = os.path.basename(_stack[3][1]) + '::' + \ _stack[3][3]+'::L{}'.format(_stack[3][2]) publish(event_type=event_type, event_data=event_data, object_type=self._type, object_id=self._id, object_key=self._key, origin=_origin)
python
def publish(self, event_type: str, event_data: dict = None): import inspect import os.path _stack = inspect.stack() _origin = os.path.basename(_stack[3][1]) + '::' + \ _stack[3][3]+'::L{}'.format(_stack[3][2]) publish(event_type=event_type, event_data=event_data, object_type=self._type, object_id=self._id, object_key=self._key, origin=_origin)
[ "def", "publish", "(", "self", ",", "event_type", ":", "str", ",", "event_data", ":", "dict", "=", "None", ")", ":", "import", "inspect", "import", "os", ".", "path", "_stack", "=", "inspect", ".", "stack", "(", ")", "_origin", "=", "os", ".", "path"...
Publish an event associated with the scheduling object. Note: Ideally publish should not be used directly but by other methods which perform actions on the object. Args: event_type (str): Type of event. event_data (dict, optional): Event data.
[ "Publish", "an", "event", "associated", "with", "the", "scheduling", "object", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/configuration_db/sip_config_db/scheduling/_scheduling_object.py#L117-L140
17,303
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/configuration_db/sip_config_db/scheduling/_scheduling_object.py
SchedulingObject.get_events
def get_events(self) -> List[Event]: """Get events associated with the scheduling object. Returns: list of Event objects """ LOG.debug('Getting events for %s', self.key) return get_events(self.key)
python
def get_events(self) -> List[Event]: LOG.debug('Getting events for %s', self.key) return get_events(self.key)
[ "def", "get_events", "(", "self", ")", "->", "List", "[", "Event", "]", ":", "LOG", ".", "debug", "(", "'Getting events for %s'", ",", "self", ".", "key", ")", "return", "get_events", "(", "self", ".", "key", ")" ]
Get events associated with the scheduling object. Returns: list of Event objects
[ "Get", "events", "associated", "with", "the", "scheduling", "object", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/configuration_db/sip_config_db/scheduling/_scheduling_object.py#L142-L150
17,304
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/configuration_db/sip_config_db/scheduling/_scheduling_object.py
SchedulingObject._check_object_exists
def _check_object_exists(self): """Raise a KeyError if the scheduling object doesnt exist. Raise: KeyError, if the object doesnt exist in the database. """ if not DB.get_keys(self.key): raise KeyError("Object with key '{}' not exist".format(self.key))
python
def _check_object_exists(self): if not DB.get_keys(self.key): raise KeyError("Object with key '{}' not exist".format(self.key))
[ "def", "_check_object_exists", "(", "self", ")", ":", "if", "not", "DB", ".", "get_keys", "(", "self", ".", "key", ")", ":", "raise", "KeyError", "(", "\"Object with key '{}' not exist\"", ".", "format", "(", "self", ".", "key", ")", ")" ]
Raise a KeyError if the scheduling object doesnt exist. Raise: KeyError, if the object doesnt exist in the database.
[ "Raise", "a", "KeyError", "if", "the", "scheduling", "object", "doesnt", "exist", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/configuration_db/sip_config_db/scheduling/_scheduling_object.py#L160-L168
17,305
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/configuration_db/sip_config_db/states/service_state.py
ServiceState.get_service_state_object_id
def get_service_state_object_id(subsystem: str, name: str, version: str) -> str: """Return service state data object key. Args: subsystem (str): Subsystem the service belongs to name (str): Name of the Service version (str): Version of the Service Returns: str, Key used to store the service state data object """ return '{}:{}:{}'.format(subsystem, name, version)
python
def get_service_state_object_id(subsystem: str, name: str, version: str) -> str: return '{}:{}:{}'.format(subsystem, name, version)
[ "def", "get_service_state_object_id", "(", "subsystem", ":", "str", ",", "name", ":", "str", ",", "version", ":", "str", ")", "->", "str", ":", "return", "'{}:{}:{}'", ".", "format", "(", "subsystem", ",", "name", ",", "version", ")" ]
Return service state data object key. Args: subsystem (str): Subsystem the service belongs to name (str): Name of the Service version (str): Version of the Service Returns: str, Key used to store the service state data object
[ "Return", "service", "state", "data", "object", "key", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/configuration_db/sip_config_db/states/service_state.py#L59-L72
17,306
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py
DockerSwarmClient.create_services
def create_services(self, compose_str: str) -> list: """Create new docker services. Args: compose_str (string): Docker compose 'file' string Return: service_names, list """ # Raise an exception if we are not a manager if not self._manager: raise RuntimeError('Services can only be run on ' 'swarm manager nodes') # Initialise empty list services_ids = [] try: service_config = yaml.load(compose_str) # Deepcopy the service config service_list = copy.deepcopy(service_config) # Removing version and service from the dict service_config.pop('version') service_config.pop('services') for service_name in service_list['services']: service_exist = self._client.services.list( filters={'name': service_name}) if not service_exist: service_config['name'] = service_name service_spec = self._parse_services( service_config, service_name, service_list) created_service = self._client.services.create( **service_spec) service_id = created_service.short_id LOG.debug('Service created: %s', service_id) services_ids.append(service_id) else: LOG.debug('Services already exists') except yaml.YAMLError as exc: print(exc) # Returning list of services created return services_ids
python
def create_services(self, compose_str: str) -> list: # Raise an exception if we are not a manager if not self._manager: raise RuntimeError('Services can only be run on ' 'swarm manager nodes') # Initialise empty list services_ids = [] try: service_config = yaml.load(compose_str) # Deepcopy the service config service_list = copy.deepcopy(service_config) # Removing version and service from the dict service_config.pop('version') service_config.pop('services') for service_name in service_list['services']: service_exist = self._client.services.list( filters={'name': service_name}) if not service_exist: service_config['name'] = service_name service_spec = self._parse_services( service_config, service_name, service_list) created_service = self._client.services.create( **service_spec) service_id = created_service.short_id LOG.debug('Service created: %s', service_id) services_ids.append(service_id) else: LOG.debug('Services already exists') except yaml.YAMLError as exc: print(exc) # Returning list of services created return services_ids
[ "def", "create_services", "(", "self", ",", "compose_str", ":", "str", ")", "->", "list", ":", "# Raise an exception if we are not a manager", "if", "not", "self", ".", "_manager", ":", "raise", "RuntimeError", "(", "'Services can only be run on '", "'swarm manager node...
Create new docker services. Args: compose_str (string): Docker compose 'file' string Return: service_names, list
[ "Create", "new", "docker", "services", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py#L88-L134
17,307
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py
DockerSwarmClient.create_volume
def create_volume(self, volume_name: str, driver_spec: str = None): """Create new docker volumes. Only the manager nodes can create a volume Args: volume_name (string): Name for the new docker volume driver_spec (string): Driver for the docker volume """ # Default values if driver_spec: driver = driver_spec else: driver = 'local' # Raise an exception if we are not a manager if not self._manager: raise RuntimeError('Services can only be deleted ' 'on swarm manager nodes') self._client.volumes.create(name=volume_name, driver=driver)
python
def create_volume(self, volume_name: str, driver_spec: str = None): # Default values if driver_spec: driver = driver_spec else: driver = 'local' # Raise an exception if we are not a manager if not self._manager: raise RuntimeError('Services can only be deleted ' 'on swarm manager nodes') self._client.volumes.create(name=volume_name, driver=driver)
[ "def", "create_volume", "(", "self", ",", "volume_name", ":", "str", ",", "driver_spec", ":", "str", "=", "None", ")", ":", "# Default values", "if", "driver_spec", ":", "driver", "=", "driver_spec", "else", ":", "driver", "=", "'local'", "# Raise an exception...
Create new docker volumes. Only the manager nodes can create a volume Args: volume_name (string): Name for the new docker volume driver_spec (string): Driver for the docker volume
[ "Create", "new", "docker", "volumes", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py#L136-L156
17,308
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py
DockerSwarmClient.delete_all_volumes
def delete_all_volumes(self): """Remove all the volumes. Only the manager nodes can delete a volume """ # Raise an exception if we are not a manager if not self._manager: raise RuntimeError('Volumes can only be deleted ' 'on swarm manager nodes') volume_list = self.get_volume_list() for volumes in volume_list: # Remove all the services self._api_client.remove_volume(volumes, force=True)
python
def delete_all_volumes(self): # Raise an exception if we are not a manager if not self._manager: raise RuntimeError('Volumes can only be deleted ' 'on swarm manager nodes') volume_list = self.get_volume_list() for volumes in volume_list: # Remove all the services self._api_client.remove_volume(volumes, force=True)
[ "def", "delete_all_volumes", "(", "self", ")", ":", "# Raise an exception if we are not a manager", "if", "not", "self", ".", "_manager", ":", "raise", "RuntimeError", "(", "'Volumes can only be deleted '", "'on swarm manager nodes'", ")", "volume_list", "=", "self", ".",...
Remove all the volumes. Only the manager nodes can delete a volume
[ "Remove", "all", "the", "volumes", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py#L209-L222
17,309
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py
DockerSwarmClient.get_service_list
def get_service_list(self) -> list: """Get a list of docker services. Only the manager nodes can retrieve all the services Returns: list, all the ids of the services in swarm """ # Initialising empty list services = [] # Raise an exception if we are not a manager if not self._manager: raise RuntimeError('Only the Swarm manager node can retrieve' ' all the services.') service_list = self._client.services.list() for s_list in service_list: services.append(s_list.short_id) return services
python
def get_service_list(self) -> list: # Initialising empty list services = [] # Raise an exception if we are not a manager if not self._manager: raise RuntimeError('Only the Swarm manager node can retrieve' ' all the services.') service_list = self._client.services.list() for s_list in service_list: services.append(s_list.short_id) return services
[ "def", "get_service_list", "(", "self", ")", "->", "list", ":", "# Initialising empty list", "services", "=", "[", "]", "# Raise an exception if we are not a manager", "if", "not", "self", ".", "_manager", ":", "raise", "RuntimeError", "(", "'Only the Swarm manager node...
Get a list of docker services. Only the manager nodes can retrieve all the services Returns: list, all the ids of the services in swarm
[ "Get", "a", "list", "of", "docker", "services", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py#L228-L248
17,310
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py
DockerSwarmClient.get_service_name
def get_service_name(self, service_id: str) -> str: """Get the name of the docker service. Only the manager nodes can retrieve service name Args: service_id (string): List of service ID Returns: string, name of the docker service """ # Raise an exception if we are not a manager if not self._manager: raise RuntimeError('Only the Swarm manager node can retrieve all' ' the services details.') service = self._client.services.get(service_id) return service.name
python
def get_service_name(self, service_id: str) -> str: # Raise an exception if we are not a manager if not self._manager: raise RuntimeError('Only the Swarm manager node can retrieve all' ' the services details.') service = self._client.services.get(service_id) return service.name
[ "def", "get_service_name", "(", "self", ",", "service_id", ":", "str", ")", "->", "str", ":", "# Raise an exception if we are not a manager", "if", "not", "self", ".", "_manager", ":", "raise", "RuntimeError", "(", "'Only the Swarm manager node can retrieve all'", "' th...
Get the name of the docker service. Only the manager nodes can retrieve service name Args: service_id (string): List of service ID Returns: string, name of the docker service
[ "Get", "the", "name", "of", "the", "docker", "service", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py#L250-L268
17,311
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py
DockerSwarmClient.get_service_details
def get_service_details(self, service_id: str) -> dict: """Get details of a service. Only the manager nodes can retrieve service details Args: service_id (string): List of service id Returns: dict, details of the service """ # Raise an exception if we are not a manager if not self._manager: raise RuntimeError('Only the Swarm manager node can retrieve all' ' the services details.') service = self._client.services.get(service_id) return service.attrs
python
def get_service_details(self, service_id: str) -> dict: # Raise an exception if we are not a manager if not self._manager: raise RuntimeError('Only the Swarm manager node can retrieve all' ' the services details.') service = self._client.services.get(service_id) return service.attrs
[ "def", "get_service_details", "(", "self", ",", "service_id", ":", "str", ")", "->", "dict", ":", "# Raise an exception if we are not a manager", "if", "not", "self", ".", "_manager", ":", "raise", "RuntimeError", "(", "'Only the Swarm manager node can retrieve all'", "...
Get details of a service. Only the manager nodes can retrieve service details Args: service_id (string): List of service id Returns: dict, details of the service
[ "Get", "details", "of", "a", "service", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py#L270-L288
17,312
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py
DockerSwarmClient.get_service_state
def get_service_state(self, service_id: str) -> str: """Get the state of the service. Only the manager nodes can retrieve service state Args: service_id (str): Service id Returns: str, state of the service """ # Get service service = self._client.services.get(service_id) # Get the state of the service for service_task in service.tasks(): service_state = service_task['DesiredState'] return service_state
python
def get_service_state(self, service_id: str) -> str: # Get service service = self._client.services.get(service_id) # Get the state of the service for service_task in service.tasks(): service_state = service_task['DesiredState'] return service_state
[ "def", "get_service_state", "(", "self", ",", "service_id", ":", "str", ")", "->", "str", ":", "# Get service", "service", "=", "self", ".", "_client", ".", "services", ".", "get", "(", "service_id", ")", "# Get the state of the service", "for", "service_task", ...
Get the state of the service. Only the manager nodes can retrieve service state Args: service_id (str): Service id Returns: str, state of the service
[ "Get", "the", "state", "of", "the", "service", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py#L290-L308
17,313
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py
DockerSwarmClient.get_node_list
def get_node_list(self) -> list: """Get a list of nodes. Only the manager nodes can retrieve all the nodes Returns: list, all the ids of the nodes in swarm """ # Initialising empty list nodes = [] # Raise an exception if we are not a manager if not self._manager: raise RuntimeError('Only the Swarm manager node ' 'can retrieve all the nodes.') node_list = self._client.nodes.list() for n_list in node_list: nodes.append(n_list.id) return nodes
python
def get_node_list(self) -> list: # Initialising empty list nodes = [] # Raise an exception if we are not a manager if not self._manager: raise RuntimeError('Only the Swarm manager node ' 'can retrieve all the nodes.') node_list = self._client.nodes.list() for n_list in node_list: nodes.append(n_list.id) return nodes
[ "def", "get_node_list", "(", "self", ")", "->", "list", ":", "# Initialising empty list", "nodes", "=", "[", "]", "# Raise an exception if we are not a manager", "if", "not", "self", ".", "_manager", ":", "raise", "RuntimeError", "(", "'Only the Swarm manager node '", ...
Get a list of nodes. Only the manager nodes can retrieve all the nodes Returns: list, all the ids of the nodes in swarm
[ "Get", "a", "list", "of", "nodes", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py#L310-L330
17,314
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py
DockerSwarmClient.get_node_details
def get_node_details(self, node_id: list) -> dict: """Get details of a node. Only the manager nodes can retrieve details of a node Args: node_id (list): List of node ID Returns: dict, details of the node """ # Raise an exception if we are not a manager if not self._manager: raise RuntimeError('Only the Swarm manager node can ' 'retrieve node details.') node = self._client.nodes.get(node_id) return node.attrs
python
def get_node_details(self, node_id: list) -> dict: # Raise an exception if we are not a manager if not self._manager: raise RuntimeError('Only the Swarm manager node can ' 'retrieve node details.') node = self._client.nodes.get(node_id) return node.attrs
[ "def", "get_node_details", "(", "self", ",", "node_id", ":", "list", ")", "->", "dict", ":", "# Raise an exception if we are not a manager", "if", "not", "self", ".", "_manager", ":", "raise", "RuntimeError", "(", "'Only the Swarm manager node can '", "'retrieve node de...
Get details of a node. Only the manager nodes can retrieve details of a node Args: node_id (list): List of node ID Returns: dict, details of the node
[ "Get", "details", "of", "a", "node", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py#L332-L350
17,315
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py
DockerSwarmClient.get_container_list
def get_container_list(self) -> list: """Get list of containers. Returns: list, all the ids of containers """ # Initialising empty list containers = [] containers_list = self._client.containers.list() for c_list in containers_list: containers.append(c_list.short_id) return containers
python
def get_container_list(self) -> list: # Initialising empty list containers = [] containers_list = self._client.containers.list() for c_list in containers_list: containers.append(c_list.short_id) return containers
[ "def", "get_container_list", "(", "self", ")", "->", "list", ":", "# Initialising empty list", "containers", "=", "[", "]", "containers_list", "=", "self", ".", "_client", ".", "containers", ".", "list", "(", ")", "for", "c_list", "in", "containers_list", ":",...
Get list of containers. Returns: list, all the ids of containers
[ "Get", "list", "of", "containers", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py#L352-L365
17,316
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py
DockerSwarmClient.get_container_details
def get_container_details(self, container_id_or_name: str) -> dict: """Get details of a container. Args: container_id_or_name (string): docker container id or name Returns: dict, details of the container """ container = self._client.containers.get(container_id_or_name) return container.attrs
python
def get_container_details(self, container_id_or_name: str) -> dict: container = self._client.containers.get(container_id_or_name) return container.attrs
[ "def", "get_container_details", "(", "self", ",", "container_id_or_name", ":", "str", ")", "->", "dict", ":", "container", "=", "self", ".", "_client", ".", "containers", ".", "get", "(", "container_id_or_name", ")", "return", "container", ".", "attrs" ]
Get details of a container. Args: container_id_or_name (string): docker container id or name Returns: dict, details of the container
[ "Get", "details", "of", "a", "container", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py#L367-L378
17,317
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py
DockerSwarmClient.get_volume_list
def get_volume_list(self) -> list: """Get a list of docker volumes. Only the manager nodes can retrieve all the volumes Returns: list, all the names of the volumes in swarm """ # Initialising empty list volumes = [] # Raise an exception if we are not a manager if not self._manager: raise RuntimeError('Only the Swarm manager node can retrieve' ' all the services.') volume_list = self._client.volumes.list() for v_list in volume_list: volumes.append(v_list.name) return volumes
python
def get_volume_list(self) -> list: # Initialising empty list volumes = [] # Raise an exception if we are not a manager if not self._manager: raise RuntimeError('Only the Swarm manager node can retrieve' ' all the services.') volume_list = self._client.volumes.list() for v_list in volume_list: volumes.append(v_list.name) return volumes
[ "def", "get_volume_list", "(", "self", ")", "->", "list", ":", "# Initialising empty list", "volumes", "=", "[", "]", "# Raise an exception if we are not a manager", "if", "not", "self", ".", "_manager", ":", "raise", "RuntimeError", "(", "'Only the Swarm manager node c...
Get a list of docker volumes. Only the manager nodes can retrieve all the volumes Returns: list, all the names of the volumes in swarm
[ "Get", "a", "list", "of", "docker", "volumes", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py#L380-L400
17,318
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py
DockerSwarmClient.get_volume_details
def get_volume_details(self, volume_name: str) -> dict: """Get details of the volume. Args: volume_name (str): Name of the volume Returns: dict, details of the volume """ if volume_name not in self.volumes: raise RuntimeError('No such volume found: ', volume_name) volume = self._client.volumes.get(volume_name) return volume.attrs
python
def get_volume_details(self, volume_name: str) -> dict: if volume_name not in self.volumes: raise RuntimeError('No such volume found: ', volume_name) volume = self._client.volumes.get(volume_name) return volume.attrs
[ "def", "get_volume_details", "(", "self", ",", "volume_name", ":", "str", ")", "->", "dict", ":", "if", "volume_name", "not", "in", "self", ".", "volumes", ":", "raise", "RuntimeError", "(", "'No such volume found: '", ",", "volume_name", ")", "volume", "=", ...
Get details of the volume. Args: volume_name (str): Name of the volume Returns: dict, details of the volume
[ "Get", "details", "of", "the", "volume", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py#L402-L416
17,319
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py
DockerSwarmClient.get_actual_replica
def get_actual_replica(self, service_id: str) -> str: """Get the actual replica level of a service. Args: service_id (str): docker swarm service id Returns: str, replicated level of the service """ # Raise an exception if we are not a manager if not self._manager: raise RuntimeError('Only the Swarm manager node can retrieve ' 'replication level of the service') service_details = self.get_service_details(service_id) actual_replica = service_details["Spec"]["Mode"][ "Replicated"]["Replicas"] return actual_replica
python
def get_actual_replica(self, service_id: str) -> str: # Raise an exception if we are not a manager if not self._manager: raise RuntimeError('Only the Swarm manager node can retrieve ' 'replication level of the service') service_details = self.get_service_details(service_id) actual_replica = service_details["Spec"]["Mode"][ "Replicated"]["Replicas"] return actual_replica
[ "def", "get_actual_replica", "(", "self", ",", "service_id", ":", "str", ")", "->", "str", ":", "# Raise an exception if we are not a manager", "if", "not", "self", ".", "_manager", ":", "raise", "RuntimeError", "(", "'Only the Swarm manager node can retrieve '", "'repl...
Get the actual replica level of a service. Args: service_id (str): docker swarm service id Returns: str, replicated level of the service
[ "Get", "the", "actual", "replica", "level", "of", "a", "service", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py#L418-L436
17,320
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py
DockerSwarmClient.get_replicas
def get_replicas(self, service_id: str) -> str: """Get the replication level of a service. Args: service_id (str): docker swarm service id Returns: str, replication level of the service """ # Initialising empty list replicas = [] # Raise an exception if we are not a manager if not self._manager: raise RuntimeError('Only the Swarm manager node can retrieve ' 'replication level of the service') service_tasks = self._client.services.get(service_id).tasks() for task in service_tasks: if task['Status']['State'] == "running": replicas.append(task) return len(replicas)
python
def get_replicas(self, service_id: str) -> str: # Initialising empty list replicas = [] # Raise an exception if we are not a manager if not self._manager: raise RuntimeError('Only the Swarm manager node can retrieve ' 'replication level of the service') service_tasks = self._client.services.get(service_id).tasks() for task in service_tasks: if task['Status']['State'] == "running": replicas.append(task) return len(replicas)
[ "def", "get_replicas", "(", "self", ",", "service_id", ":", "str", ")", "->", "str", ":", "# Initialising empty list", "replicas", "=", "[", "]", "# Raise an exception if we are not a manager", "if", "not", "self", ".", "_manager", ":", "raise", "RuntimeError", "(...
Get the replication level of a service. Args: service_id (str): docker swarm service id Returns: str, replication level of the service
[ "Get", "the", "replication", "level", "of", "a", "service", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py#L438-L460
17,321
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py
DockerSwarmClient.update_labels
def update_labels(self, node_name: str, labels: dict): """Update label of a node. Args: node_name (string): Name of the node. labels (dict): Label to add to the node """ # Raise an exception if we are not a manager if not self._manager: raise RuntimeError('Only the Swarm manager node can update ' 'node details.') # Node specification node_spec = {'Availability': 'active', 'Name': node_name, 'Role': 'manager', 'Labels': labels} node = self._client.nodes.get(node_name) node.update(node_spec)
python
def update_labels(self, node_name: str, labels: dict): # Raise an exception if we are not a manager if not self._manager: raise RuntimeError('Only the Swarm manager node can update ' 'node details.') # Node specification node_spec = {'Availability': 'active', 'Name': node_name, 'Role': 'manager', 'Labels': labels} node = self._client.nodes.get(node_name) node.update(node_spec)
[ "def", "update_labels", "(", "self", ",", "node_name", ":", "str", ",", "labels", ":", "dict", ")", ":", "# Raise an exception if we are not a manager", "if", "not", "self", ".", "_manager", ":", "raise", "RuntimeError", "(", "'Only the Swarm manager node can update '...
Update label of a node. Args: node_name (string): Name of the node. labels (dict): Label to add to the node
[ "Update", "label", "of", "a", "node", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py#L466-L484
17,322
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py
DockerSwarmClient._parse_services
def _parse_services(self, service_config: dict, service_name: str, service_list: dict) -> dict: """Parse the docker compose file. Args: service_config (dict): Service configurations from the compose file service_name (string): Name of the services service_list (dict): Service configuration list Returns: dict, service specifications extracted from the compose file """ for key, value in service_list['services'][service_name].items(): service_config[key] = value if 'command' in key: key = "args" service_config['args'] = value service_config.pop('command') if 'ports' in key: endpoint_spec = self._parse_ports(value) service_config['endpoint_spec'] = endpoint_spec service_config.pop('ports') if 'volumes' in key: volume_spec = self._parse_volumes(value) service_config['mounts'] = volume_spec service_config.pop('volumes') if 'deploy' in key: self._parse_deploy(value, service_config) service_config.pop('deploy') if 'networks' in key: network_spec = self._parse_networks(service_list) service_config['networks'] = network_spec if 'logging' in key: self._parse_logging(value, service_config) service_config.pop('logging') if 'environment' in key: service_config['env'] = value service_config.pop('environment') # LOG.info('Service Config: %s', service_config) return service_config
python
def _parse_services(self, service_config: dict, service_name: str, service_list: dict) -> dict: for key, value in service_list['services'][service_name].items(): service_config[key] = value if 'command' in key: key = "args" service_config['args'] = value service_config.pop('command') if 'ports' in key: endpoint_spec = self._parse_ports(value) service_config['endpoint_spec'] = endpoint_spec service_config.pop('ports') if 'volumes' in key: volume_spec = self._parse_volumes(value) service_config['mounts'] = volume_spec service_config.pop('volumes') if 'deploy' in key: self._parse_deploy(value, service_config) service_config.pop('deploy') if 'networks' in key: network_spec = self._parse_networks(service_list) service_config['networks'] = network_spec if 'logging' in key: self._parse_logging(value, service_config) service_config.pop('logging') if 'environment' in key: service_config['env'] = value service_config.pop('environment') # LOG.info('Service Config: %s', service_config) return service_config
[ "def", "_parse_services", "(", "self", ",", "service_config", ":", "dict", ",", "service_name", ":", "str", ",", "service_list", ":", "dict", ")", "->", "dict", ":", "for", "key", ",", "value", "in", "service_list", "[", "'services'", "]", "[", "service_na...
Parse the docker compose file. Args: service_config (dict): Service configurations from the compose file service_name (string): Name of the services service_list (dict): Service configuration list Returns: dict, service specifications extracted from the compose file
[ "Parse", "the", "docker", "compose", "file", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py#L490-L531
17,323
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py
DockerSwarmClient._parse_deploy
def _parse_deploy(self, deploy_values: dict, service_config: dict): """Parse deploy key. Args: deploy_values (dict): deploy configuration values service_config (dict): Service configuration """ # Initialising empty dictionary mode = {} for d_value in deploy_values: if 'restart_policy' in d_value: restart_spec = docker.types.RestartPolicy( **deploy_values[d_value]) service_config['restart_policy'] = restart_spec if 'placement' in d_value: for constraints_key, constraints_value in \ deploy_values[d_value].items(): service_config[constraints_key] = constraints_value if 'mode' in d_value: mode[d_value] = deploy_values[d_value] if 'replicas' in d_value: mode[d_value] = deploy_values[d_value] if 'resources' in d_value: resource_spec = self._parse_resources( deploy_values, d_value) service_config['resources'] = resource_spec # Setting the types mode_spec = docker.types.ServiceMode(**mode) service_config['mode'] = mode_spec
python
def _parse_deploy(self, deploy_values: dict, service_config: dict): # Initialising empty dictionary mode = {} for d_value in deploy_values: if 'restart_policy' in d_value: restart_spec = docker.types.RestartPolicy( **deploy_values[d_value]) service_config['restart_policy'] = restart_spec if 'placement' in d_value: for constraints_key, constraints_value in \ deploy_values[d_value].items(): service_config[constraints_key] = constraints_value if 'mode' in d_value: mode[d_value] = deploy_values[d_value] if 'replicas' in d_value: mode[d_value] = deploy_values[d_value] if 'resources' in d_value: resource_spec = self._parse_resources( deploy_values, d_value) service_config['resources'] = resource_spec # Setting the types mode_spec = docker.types.ServiceMode(**mode) service_config['mode'] = mode_spec
[ "def", "_parse_deploy", "(", "self", ",", "deploy_values", ":", "dict", ",", "service_config", ":", "dict", ")", ":", "# Initialising empty dictionary", "mode", "=", "{", "}", "for", "d_value", "in", "deploy_values", ":", "if", "'restart_policy'", "in", "d_value...
Parse deploy key. Args: deploy_values (dict): deploy configuration values service_config (dict): Service configuration
[ "Parse", "deploy", "key", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py#L533-L563
17,324
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py
DockerSwarmClient._parse_ports
def _parse_ports(port_values: dict) -> dict: """Parse ports key. Args: port_values (dict): ports configuration values Returns: dict, Ports specification which contains exposed ports """ # Initialising empty dictionary endpoints = {} for port_element in port_values: target_port = port_element.split(':') for port in target_port: endpoints[int(port)] = int(port) # Setting the types endpoint_spec = docker.types.EndpointSpec(ports=endpoints) return endpoint_spec
python
def _parse_ports(port_values: dict) -> dict: # Initialising empty dictionary endpoints = {} for port_element in port_values: target_port = port_element.split(':') for port in target_port: endpoints[int(port)] = int(port) # Setting the types endpoint_spec = docker.types.EndpointSpec(ports=endpoints) return endpoint_spec
[ "def", "_parse_ports", "(", "port_values", ":", "dict", ")", "->", "dict", ":", "# Initialising empty dictionary", "endpoints", "=", "{", "}", "for", "port_element", "in", "port_values", ":", "target_port", "=", "port_element", ".", "split", "(", "':'", ")", "...
Parse ports key. Args: port_values (dict): ports configuration values Returns: dict, Ports specification which contains exposed ports
[ "Parse", "ports", "key", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py#L570-L590
17,325
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py
DockerSwarmClient._parse_volumes
def _parse_volumes(volume_values: dict) -> str: """Parse volumes key. Args: volume_values (dict): volume configuration values Returns: string, volume specification with mount source and container path """ for v_values in volume_values: for v_key, v_value in v_values.items(): if v_key == 'source': if v_value == '.': source = os.path.dirname( os.path.abspath(__file__)) else: source = v_value if v_key == 'target': target = v_value volume_spec = [source + ':' + target] return volume_spec
python
def _parse_volumes(volume_values: dict) -> str: for v_values in volume_values: for v_key, v_value in v_values.items(): if v_key == 'source': if v_value == '.': source = os.path.dirname( os.path.abspath(__file__)) else: source = v_value if v_key == 'target': target = v_value volume_spec = [source + ':' + target] return volume_spec
[ "def", "_parse_volumes", "(", "volume_values", ":", "dict", ")", "->", "str", ":", "for", "v_values", "in", "volume_values", ":", "for", "v_key", ",", "v_value", "in", "v_values", ".", "items", "(", ")", ":", "if", "v_key", "==", "'source'", ":", "if", ...
Parse volumes key. Args: volume_values (dict): volume configuration values Returns: string, volume specification with mount source and container path
[ "Parse", "volumes", "key", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py#L593-L614
17,326
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py
DockerSwarmClient._parse_resources
def _parse_resources(resource_values: dict, resource_name: str) -> dict: """Parse resources key. Args: resource_values (dict): resource configurations values resource_name (string): Resource name Returns: dict, resources specification """ # Initialising empty dictionary resources = {} for r_values in resource_values[resource_name]: if 'limits' in r_values: for r_key, r_value in \ resource_values[resource_name][r_values].items(): if 'cpu' in r_key: cpu_value = float(r_value) * 10 ** 9 cpu_key = r_key[:3] + '_limit' resources[cpu_key] = int(cpu_value) if 'mem' in r_key: mem_value = re.sub('M', '', r_value) mem_key = r_key[:3] + '_limit' resources[mem_key] = int(mem_value) * 1048576 resources_spec = docker.types.Resources(**resources) return resources_spec
python
def _parse_resources(resource_values: dict, resource_name: str) -> dict: # Initialising empty dictionary resources = {} for r_values in resource_values[resource_name]: if 'limits' in r_values: for r_key, r_value in \ resource_values[resource_name][r_values].items(): if 'cpu' in r_key: cpu_value = float(r_value) * 10 ** 9 cpu_key = r_key[:3] + '_limit' resources[cpu_key] = int(cpu_value) if 'mem' in r_key: mem_value = re.sub('M', '', r_value) mem_key = r_key[:3] + '_limit' resources[mem_key] = int(mem_value) * 1048576 resources_spec = docker.types.Resources(**resources) return resources_spec
[ "def", "_parse_resources", "(", "resource_values", ":", "dict", ",", "resource_name", ":", "str", ")", "->", "dict", ":", "# Initialising empty dictionary", "resources", "=", "{", "}", "for", "r_values", "in", "resource_values", "[", "resource_name", "]", ":", "...
Parse resources key. Args: resource_values (dict): resource configurations values resource_name (string): Resource name Returns: dict, resources specification
[ "Parse", "resources", "key", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py#L617-L645
17,327
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py
DockerSwarmClient._parse_networks
def _parse_networks(service_list: dict) -> list: """Parse network key. Args: service_list (dict): Service configurations Returns: list, List of networks """ # Initialising empty list networks = [] for n_values in service_list['networks'].values(): for n_key, n_value in n_values.items(): if 'name' in n_key: networks.append(n_value) return networks
python
def _parse_networks(service_list: dict) -> list: # Initialising empty list networks = [] for n_values in service_list['networks'].values(): for n_key, n_value in n_values.items(): if 'name' in n_key: networks.append(n_value) return networks
[ "def", "_parse_networks", "(", "service_list", ":", "dict", ")", "->", "list", ":", "# Initialising empty list", "networks", "=", "[", "]", "for", "n_values", "in", "service_list", "[", "'networks'", "]", ".", "values", "(", ")", ":", "for", "n_key", ",", ...
Parse network key. Args: service_list (dict): Service configurations Returns: list, List of networks
[ "Parse", "network", "key", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py#L648-L665
17,328
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py
DockerSwarmClient._parse_logging
def _parse_logging(log_values: dict, service_config: dict): """Parse log key. Args: log_values (dict): logging configuration values service_config (dict): Service specification """ for log_key, log_value in log_values.items(): if 'driver' in log_key: service_config['log_driver'] = log_value if 'options' in log_key: service_config['log_driver_options'] = log_value
python
def _parse_logging(log_values: dict, service_config: dict): for log_key, log_value in log_values.items(): if 'driver' in log_key: service_config['log_driver'] = log_value if 'options' in log_key: service_config['log_driver_options'] = log_value
[ "def", "_parse_logging", "(", "log_values", ":", "dict", ",", "service_config", ":", "dict", ")", ":", "for", "log_key", ",", "log_value", "in", "log_values", ".", "items", "(", ")", ":", "if", "'driver'", "in", "log_key", ":", "service_config", "[", "'log...
Parse log key. Args: log_values (dict): logging configuration values service_config (dict): Service specification
[ "Parse", "log", "key", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py#L668-L679
17,329
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/processing_controller/scheduler/scheduler.py
ProcessingBlockScheduler._init_queue
def _init_queue(): """Initialise the Processing Block queue from the database. This method should populate the queue from the current state of the Configuration Database. This needs to be based on the current set of Processing Blocks in the database and consider events on these processing blocks. """ LOG.info('Initialising Processing Block queue.') queue = ProcessingBlockQueue() active_pb_ids = ProcessingBlockList().active LOG.info('Initialising PC PB queue: %s', active_pb_ids) for pb_id in active_pb_ids: pb = ProcessingBlock(pb_id) queue.put(pb.id, pb.priority, pb.type) return queue
python
def _init_queue(): LOG.info('Initialising Processing Block queue.') queue = ProcessingBlockQueue() active_pb_ids = ProcessingBlockList().active LOG.info('Initialising PC PB queue: %s', active_pb_ids) for pb_id in active_pb_ids: pb = ProcessingBlock(pb_id) queue.put(pb.id, pb.priority, pb.type) return queue
[ "def", "_init_queue", "(", ")", ":", "LOG", ".", "info", "(", "'Initialising Processing Block queue.'", ")", "queue", "=", "ProcessingBlockQueue", "(", ")", "active_pb_ids", "=", "ProcessingBlockList", "(", ")", ".", "active", "LOG", ".", "info", "(", "'Initiali...
Initialise the Processing Block queue from the database. This method should populate the queue from the current state of the Configuration Database. This needs to be based on the current set of Processing Blocks in the database and consider events on these processing blocks.
[ "Initialise", "the", "Processing", "Block", "queue", "from", "the", "database", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/processing_controller/scheduler/scheduler.py#L44-L60
17,330
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/processing_controller/scheduler/scheduler.py
ProcessingBlockScheduler._monitor_events
def _monitor_events(self): """Watch for Processing Block events.""" LOG.info("Starting to monitor PB events") check_counter = 0 while True: if check_counter == 50: check_counter = 0 LOG.debug('Checking for PB events...') published_events = self._pb_events.get_published_events() for event in published_events: if event.type == 'status_changed': LOG.info('PB status changed event: %s', event.data['status']) if event.data['status'] == 'created': LOG.info('Acknowledged PB created event (%s) for %s, ' '[timestamp: %s]', event.id, event.object_id, event.timestamp) pb = ProcessingBlock(event.object_id) self._queue.put(event.object_id, pb.priority, pb.type) if event.data['status'] == 'completed': LOG.info('Acknowledged PB completed event (%s) for %s,' ' [timestamp: %s]', event.id, event.object_id, event.timestamp) self._num_pbcs -= 1 if self._num_pbcs < 0: self._num_pbcs = 0 time.sleep(0.1) check_counter += 1
python
def _monitor_events(self): LOG.info("Starting to monitor PB events") check_counter = 0 while True: if check_counter == 50: check_counter = 0 LOG.debug('Checking for PB events...') published_events = self._pb_events.get_published_events() for event in published_events: if event.type == 'status_changed': LOG.info('PB status changed event: %s', event.data['status']) if event.data['status'] == 'created': LOG.info('Acknowledged PB created event (%s) for %s, ' '[timestamp: %s]', event.id, event.object_id, event.timestamp) pb = ProcessingBlock(event.object_id) self._queue.put(event.object_id, pb.priority, pb.type) if event.data['status'] == 'completed': LOG.info('Acknowledged PB completed event (%s) for %s,' ' [timestamp: %s]', event.id, event.object_id, event.timestamp) self._num_pbcs -= 1 if self._num_pbcs < 0: self._num_pbcs = 0 time.sleep(0.1) check_counter += 1
[ "def", "_monitor_events", "(", "self", ")", ":", "LOG", ".", "info", "(", "\"Starting to monitor PB events\"", ")", "check_counter", "=", "0", "while", "True", ":", "if", "check_counter", "==", "50", ":", "check_counter", "=", "0", "LOG", ".", "debug", "(", ...
Watch for Processing Block events.
[ "Watch", "for", "Processing", "Block", "events", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/processing_controller/scheduler/scheduler.py#L66-L97
17,331
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/processing_controller/scheduler/scheduler.py
ProcessingBlockScheduler._schedule_processing_blocks
def _schedule_processing_blocks(self): """Schedule Processing Blocks for execution.""" LOG.info('Starting to Schedule Processing Blocks.') while True: time.sleep(0.5) if not self._queue: continue if self._num_pbcs >= self._max_pbcs: LOG.warning('Resource limit reached!') continue _inspect = Inspect(app=APP) if self._queue and _inspect.active() is not None: next_pb = self._queue[-1] LOG.info('Considering %s for execution...', next_pb[2]) utc_now = datetime.datetime.utcnow() time_in_queue = (utc_now - datetime_from_isoformat(next_pb[4])) if time_in_queue.total_seconds() >= 10: item = self._queue.get() LOG.info('------------------------------------') LOG.info('>>> Executing %s! <<<', item) LOG.info('------------------------------------') execute_processing_block.delay(item) self._num_pbcs += 1 else: LOG.info('Waiting for resources for %s', next_pb[2])
python
def _schedule_processing_blocks(self): LOG.info('Starting to Schedule Processing Blocks.') while True: time.sleep(0.5) if not self._queue: continue if self._num_pbcs >= self._max_pbcs: LOG.warning('Resource limit reached!') continue _inspect = Inspect(app=APP) if self._queue and _inspect.active() is not None: next_pb = self._queue[-1] LOG.info('Considering %s for execution...', next_pb[2]) utc_now = datetime.datetime.utcnow() time_in_queue = (utc_now - datetime_from_isoformat(next_pb[4])) if time_in_queue.total_seconds() >= 10: item = self._queue.get() LOG.info('------------------------------------') LOG.info('>>> Executing %s! <<<', item) LOG.info('------------------------------------') execute_processing_block.delay(item) self._num_pbcs += 1 else: LOG.info('Waiting for resources for %s', next_pb[2])
[ "def", "_schedule_processing_blocks", "(", "self", ")", ":", "LOG", ".", "info", "(", "'Starting to Schedule Processing Blocks.'", ")", "while", "True", ":", "time", ".", "sleep", "(", "0.5", ")", "if", "not", "self", ".", "_queue", ":", "continue", "if", "s...
Schedule Processing Blocks for execution.
[ "Schedule", "Processing", "Blocks", "for", "execution", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/processing_controller/scheduler/scheduler.py#L110-L135
17,332
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/processing_controller/scheduler/scheduler.py
ProcessingBlockScheduler._monitor_pbc_status
def _monitor_pbc_status(self): """Monitor the PBC status.""" LOG.info('Starting to Monitor PBC status.') inspect = celery.current_app.control.inspect() workers = inspect.ping() start_time = time.time() while workers is None: time.sleep(0.1) elapsed = time.time() - start_time if elapsed > 20.0: LOG.warning('PBC not found!') break if workers is not None: for worker in workers: _tasks = inspect.registered_tasks()[worker] LOG.info('Worker: %s tasks:', worker) for task_index, task_name in enumerate(_tasks): LOG.info(' %02d : %s', task_index, task_name) while True: LOG.info('Checking PBC status (%d/%d)', self._num_pbcs, self._max_pbcs) celery_app = celery.current_app inspect = celery_app.control.inspect() workers = inspect.ping() if workers is None: LOG.warning('PBC service not found!') else: LOG.info('PBC state: %s', celery_app.events.State()) _active = inspect.active() _scheduled = inspect.scheduled() for worker in workers: LOG.info(' Worker %s: scheduled: %s, active: %s', worker, _active[worker], _scheduled[worker]) time.sleep(self._report_interval)
python
def _monitor_pbc_status(self): LOG.info('Starting to Monitor PBC status.') inspect = celery.current_app.control.inspect() workers = inspect.ping() start_time = time.time() while workers is None: time.sleep(0.1) elapsed = time.time() - start_time if elapsed > 20.0: LOG.warning('PBC not found!') break if workers is not None: for worker in workers: _tasks = inspect.registered_tasks()[worker] LOG.info('Worker: %s tasks:', worker) for task_index, task_name in enumerate(_tasks): LOG.info(' %02d : %s', task_index, task_name) while True: LOG.info('Checking PBC status (%d/%d)', self._num_pbcs, self._max_pbcs) celery_app = celery.current_app inspect = celery_app.control.inspect() workers = inspect.ping() if workers is None: LOG.warning('PBC service not found!') else: LOG.info('PBC state: %s', celery_app.events.State()) _active = inspect.active() _scheduled = inspect.scheduled() for worker in workers: LOG.info(' Worker %s: scheduled: %s, active: %s', worker, _active[worker], _scheduled[worker]) time.sleep(self._report_interval)
[ "def", "_monitor_pbc_status", "(", "self", ")", ":", "LOG", ".", "info", "(", "'Starting to Monitor PBC status.'", ")", "inspect", "=", "celery", ".", "current_app", ".", "control", ".", "inspect", "(", ")", "workers", "=", "inspect", ".", "ping", "(", ")", ...
Monitor the PBC status.
[ "Monitor", "the", "PBC", "status", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/processing_controller/scheduler/scheduler.py#L137-L171
17,333
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/processing_controller/scheduler/scheduler.py
ProcessingBlockScheduler.start
def start(self): """Start the scheduler threads.""" # TODO(BMo) having this check is probably a good idea but I've \ # disabled it for now while the PBC is in flux. # assert sip_pbc.release.__version__ == '1.2.3' scheduler_threads = [ Thread(target=self._monitor_events, daemon=True), Thread(target=self._processing_controller_status, daemon=True), Thread(target=self._schedule_processing_blocks, daemon=True), Thread(target=self._monitor_pbc_status, daemon=True) ] for thread in scheduler_threads: thread.start() try: for thread in scheduler_threads: thread.join() except KeyboardInterrupt: LOG.info('Keyboard interrupt!') sys.exit(0) finally: LOG.info('Finally!')
python
def start(self): # TODO(BMo) having this check is probably a good idea but I've \ # disabled it for now while the PBC is in flux. # assert sip_pbc.release.__version__ == '1.2.3' scheduler_threads = [ Thread(target=self._monitor_events, daemon=True), Thread(target=self._processing_controller_status, daemon=True), Thread(target=self._schedule_processing_blocks, daemon=True), Thread(target=self._monitor_pbc_status, daemon=True) ] for thread in scheduler_threads: thread.start() try: for thread in scheduler_threads: thread.join() except KeyboardInterrupt: LOG.info('Keyboard interrupt!') sys.exit(0) finally: LOG.info('Finally!')
[ "def", "start", "(", "self", ")", ":", "# TODO(BMo) having this check is probably a good idea but I've \\", "# disabled it for now while the PBC is in flux.", "# assert sip_pbc.release.__version__ == '1.2.3'", "scheduler_threads", "=", "[", "Thread", "(", "target", "=", "self", "."...
Start the scheduler threads.
[ "Start", "the", "scheduler", "threads", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/processing_controller/scheduler/scheduler.py#L173-L196
17,334
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/master_controller/app/__main__.py
_update_service_current_state
def _update_service_current_state(service: ServiceState): """Update the current state of a service. Updates the current state of services after their target state has changed. Args: service (ServiceState): Service state object to update """ LOG.debug("Setting current state from target state for %s", service.id) service.update_current_state(service.target_state)
python
def _update_service_current_state(service: ServiceState): LOG.debug("Setting current state from target state for %s", service.id) service.update_current_state(service.target_state)
[ "def", "_update_service_current_state", "(", "service", ":", "ServiceState", ")", ":", "LOG", ".", "debug", "(", "\"Setting current state from target state for %s\"", ",", "service", ".", "id", ")", "service", ".", "update_current_state", "(", "service", ".", "target_...
Update the current state of a service. Updates the current state of services after their target state has changed. Args: service (ServiceState): Service state object to update
[ "Update", "the", "current", "state", "of", "a", "service", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/master_controller/app/__main__.py#L28-L38
17,335
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/master_controller/app/__main__.py
_update_services_instant_gratification
def _update_services_instant_gratification(sdp_target_state: str): """For demonstration purposes only. This instantly updates the services current state with the target state, rather than wait on them or schedule random delays in bringing them back up. """ service_states = get_service_state_list() # Set the target state of services for service in service_states: if service.current_state != sdp_target_state: LOG.debug('Setting the current state of %s to be %s', service.id, sdp_target_state) service.update_current_state(sdp_target_state)
python
def _update_services_instant_gratification(sdp_target_state: str): service_states = get_service_state_list() # Set the target state of services for service in service_states: if service.current_state != sdp_target_state: LOG.debug('Setting the current state of %s to be %s', service.id, sdp_target_state) service.update_current_state(sdp_target_state)
[ "def", "_update_services_instant_gratification", "(", "sdp_target_state", ":", "str", ")", ":", "service_states", "=", "get_service_state_list", "(", ")", "# Set the target state of services", "for", "service", "in", "service_states", ":", "if", "service", ".", "current_s...
For demonstration purposes only. This instantly updates the services current state with the target state, rather than wait on them or schedule random delays in bringing them back up.
[ "For", "demonstration", "purposes", "only", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/master_controller/app/__main__.py#L41-L55
17,336
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/master_controller/app/__main__.py
_update_services_target_state
def _update_services_target_state(sdp_target_state: str): """Update the target states of services based on SDP target state. When we get a new target state this function is called to ensure components receive the target state(s) and/or act on them. Args: sdp_target_state (str): Target state of SDP """ service_states = get_service_state_list() # Set the target state of services for service in service_states: if service.current_state != sdp_target_state: LOG.debug('Setting the target state of %s to be %s', service.id, sdp_target_state) service.update_target_state(sdp_target_state)
python
def _update_services_target_state(sdp_target_state: str): service_states = get_service_state_list() # Set the target state of services for service in service_states: if service.current_state != sdp_target_state: LOG.debug('Setting the target state of %s to be %s', service.id, sdp_target_state) service.update_target_state(sdp_target_state)
[ "def", "_update_services_target_state", "(", "sdp_target_state", ":", "str", ")", ":", "service_states", "=", "get_service_state_list", "(", ")", "# Set the target state of services", "for", "service", "in", "service_states", ":", "if", "service", ".", "current_state", ...
Update the target states of services based on SDP target state. When we get a new target state this function is called to ensure components receive the target state(s) and/or act on them. Args: sdp_target_state (str): Target state of SDP
[ "Update", "the", "target", "states", "of", "services", "based", "on", "SDP", "target", "state", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/master_controller/app/__main__.py#L60-L77
17,337
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/master_controller/app/__main__.py
_handle_sdp_target_state_updated
def _handle_sdp_target_state_updated(sdp_state: SDPState): """Respond to an SDP target state change event. This function sets the current state of SDP to the target state if that is possible. TODO(BMo) This cant be done as a blocking function as it is here! """ LOG.info('Handling SDP target state updated event...') LOG.info('SDP target state: %s', sdp_state.target_state) # Map between the SDP target state and the service target state? if sdp_state.target_state == 'off': _update_services_target_state('off') # TODO: Work out if the state of SDP has reached the target state. # If yes, update the current state. sdp_state.update_current_state(sdp_state.target_state)
python
def _handle_sdp_target_state_updated(sdp_state: SDPState): LOG.info('Handling SDP target state updated event...') LOG.info('SDP target state: %s', sdp_state.target_state) # Map between the SDP target state and the service target state? if sdp_state.target_state == 'off': _update_services_target_state('off') # TODO: Work out if the state of SDP has reached the target state. # If yes, update the current state. sdp_state.update_current_state(sdp_state.target_state)
[ "def", "_handle_sdp_target_state_updated", "(", "sdp_state", ":", "SDPState", ")", ":", "LOG", ".", "info", "(", "'Handling SDP target state updated event...'", ")", "LOG", ".", "info", "(", "'SDP target state: %s'", ",", "sdp_state", ".", "target_state", ")", "# Map ...
Respond to an SDP target state change event. This function sets the current state of SDP to the target state if that is possible. TODO(BMo) This cant be done as a blocking function as it is here!
[ "Respond", "to", "an", "SDP", "target", "state", "change", "event", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/master_controller/app/__main__.py#L85-L103
17,338
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/master_controller/app/__main__.py
_init
def _init(sdp_state: SDPState): """Initialise the Master Controller Service. Performs the following actions: 1. Registers ServiceState objects into the Config Db. 2. If initialising for the first time (unknown state), sets the SDPState to 'init' 3. Initialises the state of Services, if running for the first time (their state == unknown) 4. Waits some time and sets the Service states to 'on'. This emulates waiting for Services to become available. 5. Once all services are 'on', sets the SDP state to 'standby'. """ # Parse command line arguments. LOG.info("Initialising: %s", __service_id__) # FIXME(BMo) There is a bug when SDP or services 'start' in the 'off' # state. At the moment it is impossible to transition out of this. # FIXME(BMo) **Hack** Register all services or if already registered do # nothing (this is handled by the ServiceState object). _services = [ "ExecutionControl:AlarmReceiver:1.0.0", "ExecutionControl:AlertManager:1.0.0", "ExecutionControl:ConfigurationDatabase:5.0.1", "ExecutionControl:MasterController:1.3.0", "ExecutionControl:ProcessingController:1.2.6", "ExecutionControl:ProcessingBlockController:1.3.0", "TangoControl:Database:1.0.4", "TangoControl:MySQL:1.0.3", "TangoControl:SDPMaster:1.2.1", "TangoControl:Subarrays:1.2.0", "TangoControl:ProcessingBlocks:1.2.0", "Platform:Kafka:2.1.1", "Platform:Prometheus:1.0.0", "Platform:PrometheusPushGateway:0.7.0", "Platform:RedisCommander:210.0.0", "Platform:Zookeeper:3.4.13" ] for service_id in _services: subsystem, name, version = service_id.split(':') ServiceState(subsystem, name, version) # If the SDP state is 'unknown', mark the SDP state as init. # FIXME(BMo) This is not right as we want to allow for recovery from # failure without just reinitialising...!? ie. respect the old sate # NOTE: If the state is 'off' we will want to reset the database # with 'skasip_config_db_init --clear' if sdp_state.current_state in ['unknown', 'off']: try: LOG.info("Setting the SDPState to 'init'") sdp_state.update_current_state('init', force=True) except ValueError as error: LOG.critical('Unable to set the State of SDP to init! %s', str(error)) LOG.info("Updating Service States") service_state_list = get_service_state_list() # FIXME(BMo) **Hack** Mark all Services in the 'unknown' state as # initialising. for service_state in service_state_list: if service_state.current_state in ['unknown', 'off']: service_state.update_current_state('init', force=True) # FIXME(BMo) **Hack** After 'checking' that the services are 'on' set # their state on 'on' after a short delay. # FIXME(BMo) This check should not be serialised!!! (should be part of the # event loop) for service_state in service_state_list: if service_state.current_state == 'init': time.sleep(random.uniform(0, 0.2)) service_state.update_current_state('on') # FIXME(BMo): **Hack** Now the all services are on, set the sate of SDP to # 'standby' # FIXME(BMo) This should also be part of the event loop. services_on = [service.current_state == 'on' for service in service_state_list] if all(services_on): LOG.info('All Services are online!.') sdp_state.update_current_state('standby') else: LOG.critical('Master Controller failed to initialise.') return service_state_list
python
def _init(sdp_state: SDPState): # Parse command line arguments. LOG.info("Initialising: %s", __service_id__) # FIXME(BMo) There is a bug when SDP or services 'start' in the 'off' # state. At the moment it is impossible to transition out of this. # FIXME(BMo) **Hack** Register all services or if already registered do # nothing (this is handled by the ServiceState object). _services = [ "ExecutionControl:AlarmReceiver:1.0.0", "ExecutionControl:AlertManager:1.0.0", "ExecutionControl:ConfigurationDatabase:5.0.1", "ExecutionControl:MasterController:1.3.0", "ExecutionControl:ProcessingController:1.2.6", "ExecutionControl:ProcessingBlockController:1.3.0", "TangoControl:Database:1.0.4", "TangoControl:MySQL:1.0.3", "TangoControl:SDPMaster:1.2.1", "TangoControl:Subarrays:1.2.0", "TangoControl:ProcessingBlocks:1.2.0", "Platform:Kafka:2.1.1", "Platform:Prometheus:1.0.0", "Platform:PrometheusPushGateway:0.7.0", "Platform:RedisCommander:210.0.0", "Platform:Zookeeper:3.4.13" ] for service_id in _services: subsystem, name, version = service_id.split(':') ServiceState(subsystem, name, version) # If the SDP state is 'unknown', mark the SDP state as init. # FIXME(BMo) This is not right as we want to allow for recovery from # failure without just reinitialising...!? ie. respect the old sate # NOTE: If the state is 'off' we will want to reset the database # with 'skasip_config_db_init --clear' if sdp_state.current_state in ['unknown', 'off']: try: LOG.info("Setting the SDPState to 'init'") sdp_state.update_current_state('init', force=True) except ValueError as error: LOG.critical('Unable to set the State of SDP to init! %s', str(error)) LOG.info("Updating Service States") service_state_list = get_service_state_list() # FIXME(BMo) **Hack** Mark all Services in the 'unknown' state as # initialising. for service_state in service_state_list: if service_state.current_state in ['unknown', 'off']: service_state.update_current_state('init', force=True) # FIXME(BMo) **Hack** After 'checking' that the services are 'on' set # their state on 'on' after a short delay. # FIXME(BMo) This check should not be serialised!!! (should be part of the # event loop) for service_state in service_state_list: if service_state.current_state == 'init': time.sleep(random.uniform(0, 0.2)) service_state.update_current_state('on') # FIXME(BMo): **Hack** Now the all services are on, set the sate of SDP to # 'standby' # FIXME(BMo) This should also be part of the event loop. services_on = [service.current_state == 'on' for service in service_state_list] if all(services_on): LOG.info('All Services are online!.') sdp_state.update_current_state('standby') else: LOG.critical('Master Controller failed to initialise.') return service_state_list
[ "def", "_init", "(", "sdp_state", ":", "SDPState", ")", ":", "# Parse command line arguments.", "LOG", ".", "info", "(", "\"Initialising: %s\"", ",", "__service_id__", ")", "# FIXME(BMo) There is a bug when SDP or services 'start' in the 'off'", "# state. At the moment it is impo...
Initialise the Master Controller Service. Performs the following actions: 1. Registers ServiceState objects into the Config Db. 2. If initialising for the first time (unknown state), sets the SDPState to 'init' 3. Initialises the state of Services, if running for the first time (their state == unknown) 4. Waits some time and sets the Service states to 'on'. This emulates waiting for Services to become available. 5. Once all services are 'on', sets the SDP state to 'standby'.
[ "Initialise", "the", "Master", "Controller", "Service", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/master_controller/app/__main__.py#L127-L213
17,339
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/master_controller/app/__main__.py
_process_event
def _process_event(event: Event, sdp_state: SDPState, service_states: List[ServiceState]): """Process a SDP state change event.""" LOG.debug('Event detected! (id : "%s", type: "%s", data: "%s")', event.object_id, event.type, event.data) if event.object_id == 'SDP' and event.type == 'current_state_updated': LOG.info('SDP current state updated, no action required!') if event.object_id == 'SDP' and event.type == 'target_state_updated': LOG.info("SDP target state changed to '%s'", sdp_state.target_state) # If the sdp is already in the target state do nothing if sdp_state.target_state == sdp_state.current_state: LOG.warning('SDP already in %s state', sdp_state.current_state) return # Check that a transition to the target state is allowed in the # current state. if not sdp_state.is_target_state_allowed(sdp_state.target_state): LOG.error('Transition to %s is not allowed when in state %s', sdp_state.target_state, sdp_state.current_state) sdp_state.target_state = sdp_state.current_state return _update_services_target_state(sdp_state.target_state) # If asking SDP to turn off, also turn off services. if sdp_state.target_state == 'off': LOG.info('Turning off services!') for service_state in service_states: service_state.update_target_state('off') service_state.update_current_state('off') LOG.info('Processing target state change request ...') time.sleep(0.1) LOG.info('Done processing target state change request!') # Assuming that the SDP has responding to the target # target state command by now, set the current state # to the target state. sdp_state.update_current_state(sdp_state.target_state) if sdp_state.current_state == 'alarm': LOG.debug('raising SDP state alarm') SIP_STATE_ALARM.set(1) else: SIP_STATE_ALARM.set(0) try: # FIXME(BMo) the pushgateway host should not be hardcoded! push_to_gateway('platform_pushgateway:9091', job='SIP', registry=COLLECTOR_REGISTRY) except urllib.error.URLError: LOG.warning("Unable to connect to the Alarms service!")
python
def _process_event(event: Event, sdp_state: SDPState, service_states: List[ServiceState]): LOG.debug('Event detected! (id : "%s", type: "%s", data: "%s")', event.object_id, event.type, event.data) if event.object_id == 'SDP' and event.type == 'current_state_updated': LOG.info('SDP current state updated, no action required!') if event.object_id == 'SDP' and event.type == 'target_state_updated': LOG.info("SDP target state changed to '%s'", sdp_state.target_state) # If the sdp is already in the target state do nothing if sdp_state.target_state == sdp_state.current_state: LOG.warning('SDP already in %s state', sdp_state.current_state) return # Check that a transition to the target state is allowed in the # current state. if not sdp_state.is_target_state_allowed(sdp_state.target_state): LOG.error('Transition to %s is not allowed when in state %s', sdp_state.target_state, sdp_state.current_state) sdp_state.target_state = sdp_state.current_state return _update_services_target_state(sdp_state.target_state) # If asking SDP to turn off, also turn off services. if sdp_state.target_state == 'off': LOG.info('Turning off services!') for service_state in service_states: service_state.update_target_state('off') service_state.update_current_state('off') LOG.info('Processing target state change request ...') time.sleep(0.1) LOG.info('Done processing target state change request!') # Assuming that the SDP has responding to the target # target state command by now, set the current state # to the target state. sdp_state.update_current_state(sdp_state.target_state) if sdp_state.current_state == 'alarm': LOG.debug('raising SDP state alarm') SIP_STATE_ALARM.set(1) else: SIP_STATE_ALARM.set(0) try: # FIXME(BMo) the pushgateway host should not be hardcoded! push_to_gateway('platform_pushgateway:9091', job='SIP', registry=COLLECTOR_REGISTRY) except urllib.error.URLError: LOG.warning("Unable to connect to the Alarms service!")
[ "def", "_process_event", "(", "event", ":", "Event", ",", "sdp_state", ":", "SDPState", ",", "service_states", ":", "List", "[", "ServiceState", "]", ")", ":", "LOG", ".", "debug", "(", "'Event detected! (id : \"%s\", type: \"%s\", data: \"%s\")'", ",", "event", "...
Process a SDP state change event.
[ "Process", "a", "SDP", "state", "change", "event", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/master_controller/app/__main__.py#L216-L271
17,340
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/master_controller/app/__main__.py
_process_state_change_events
def _process_state_change_events(): """Process events relating to the overall state of SDP. This function starts and event loop which continually checks for and responds to SDP state change events. """ sdp_state = SDPState() service_states = get_service_state_list() state_events = sdp_state.get_event_queue(subscriber=__service_name__) state_is_off = sdp_state.current_state == 'off' counter = 0 while True: time.sleep(0.1) if not state_is_off: # *Hack* to avoid problems with historical events not being # correctly handled by EventQueue.get(), replay old events every # 10s # - see issue #54 if counter % 1000 == 0: LOG.debug('Checking published events ... %d', counter / 1000) _published_events = state_events.get_published_events( process=True) for _state_event in _published_events: _process_event(_state_event, sdp_state, service_states) else: _state_event = state_events.get() if _state_event: _process_event(_state_event, sdp_state, service_states) state_is_off = sdp_state.current_state == 'off' counter += 1
python
def _process_state_change_events(): sdp_state = SDPState() service_states = get_service_state_list() state_events = sdp_state.get_event_queue(subscriber=__service_name__) state_is_off = sdp_state.current_state == 'off' counter = 0 while True: time.sleep(0.1) if not state_is_off: # *Hack* to avoid problems with historical events not being # correctly handled by EventQueue.get(), replay old events every # 10s # - see issue #54 if counter % 1000 == 0: LOG.debug('Checking published events ... %d', counter / 1000) _published_events = state_events.get_published_events( process=True) for _state_event in _published_events: _process_event(_state_event, sdp_state, service_states) else: _state_event = state_events.get() if _state_event: _process_event(_state_event, sdp_state, service_states) state_is_off = sdp_state.current_state == 'off' counter += 1
[ "def", "_process_state_change_events", "(", ")", ":", "sdp_state", "=", "SDPState", "(", ")", "service_states", "=", "get_service_state_list", "(", ")", "state_events", "=", "sdp_state", ".", "get_event_queue", "(", "subscriber", "=", "__service_name__", ")", "state...
Process events relating to the overall state of SDP. This function starts and event loop which continually checks for and responds to SDP state change events.
[ "Process", "events", "relating", "to", "the", "overall", "state", "of", "SDP", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/master_controller/app/__main__.py#L278-L309
17,341
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/master_controller/app/__main__.py
main
def main(): """Merge temp_main and main.""" # Parse command line args. _parse_args() LOG.info("Starting: %s", __service_id__) # Subscribe to state change events. # FIXME(BMo) This API is unfortunate as it looks like we are only # subscribing to sdp_state events. LOG.info('Subscribing to state change events (subscriber = %s)', __service_name__) sdp_state = SDPState() _ = sdp_state.subscribe(subscriber=__service_name__) # Initialise the service. _ = _init(sdp_state) LOG.info('Finished initialising!') # Enter a pseudo event-loop (using Sched) to monitor for state change # events # (Also random set services into a fault or alarm state if enabled) LOG.info('Responding to state change events ...') try: _process_state_change_events() except ValueError as error: LOG.critical('Value error: %s', str(error)) except KeyboardInterrupt as err: LOG.debug('Keyboard Interrupt %s', err) LOG.info('Exiting!')
python
def main(): # Parse command line args. _parse_args() LOG.info("Starting: %s", __service_id__) # Subscribe to state change events. # FIXME(BMo) This API is unfortunate as it looks like we are only # subscribing to sdp_state events. LOG.info('Subscribing to state change events (subscriber = %s)', __service_name__) sdp_state = SDPState() _ = sdp_state.subscribe(subscriber=__service_name__) # Initialise the service. _ = _init(sdp_state) LOG.info('Finished initialising!') # Enter a pseudo event-loop (using Sched) to monitor for state change # events # (Also random set services into a fault or alarm state if enabled) LOG.info('Responding to state change events ...') try: _process_state_change_events() except ValueError as error: LOG.critical('Value error: %s', str(error)) except KeyboardInterrupt as err: LOG.debug('Keyboard Interrupt %s', err) LOG.info('Exiting!')
[ "def", "main", "(", ")", ":", "# Parse command line args.", "_parse_args", "(", ")", "LOG", ".", "info", "(", "\"Starting: %s\"", ",", "__service_id__", ")", "# Subscribe to state change events.", "# FIXME(BMo) This API is unfortunate as it looks like we are only", "# subscribi...
Merge temp_main and main.
[ "Merge", "temp_main", "and", "main", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/master_controller/app/__main__.py#L312-L340
17,342
SKA-ScienceDataProcessor/integration-prototype
sip/science_pipeline_workflows/ingest_visibilities/recv_c/send.py
main
def main(): """Runs the test sender.""" stream_config = spead2.send.StreamConfig( max_packet_size=16356, rate=1000e6, burst_size=10, max_heaps=1) item_group = spead2.send.ItemGroup(flavour=spead2.Flavour(4, 64, 48, 0)) # Add item descriptors to the heap. num_baselines = (512 * 513) // 2 dtype = [('TCI', 'i1'), ('FD', 'u1'), ('VIS', '<c8', 4)] item_group.add_item( id=0x6000, name='visibility_timestamp_count', description='', shape=tuple(), format=None, dtype='<u4') item_group.add_item( id=0x6001, name='visibility_timestamp_fraction', description='', shape=tuple(), format=None, dtype='<u4') item_group.add_item( id=0x6005, name='visibility_baseline_count', description='', shape=tuple(), format=None, dtype='<u4') item_group.add_item( id=0x6008, name='scan_id', description='', shape=tuple(), format=None, dtype='<u8') item_group.add_item( id=0x600A, name='correlator_output_data', description='', shape=(num_baselines,), dtype=dtype) # Create streams and send start-of-stream message. streams = [] num_streams = 2 for i in range(num_streams): stream = spead2.send.UdpStream( thread_pool=spead2.ThreadPool(threads=1), hostname='127.0.0.1', port=41000 + i, config=stream_config) stream.send_heap(item_group.get_start()) streams.append(stream) vis = numpy.zeros(shape=(num_baselines,), dtype=dtype) num_heaps = 200 start_time = time.time() for stream in streams: # Update values in the heap. item_group['visibility_timestamp_count'].value = 1 item_group['visibility_timestamp_fraction'].value = 0 item_group['visibility_baseline_count'].value = num_baselines item_group['scan_id'].value = 100000000 item_group['correlator_output_data'].value = vis # Iterate heaps. for i in range(num_heaps): # Send heap. stream.send_heap(item_group.get_heap(descriptors='all', data='all')) # Print time taken. duration = time.time() - start_time data_size = num_streams * num_heaps * (vis.nbytes / 1e6) print("Sent %.3f MB in %.3f sec (%.3f MB/sec)" % ( data_size, duration, (data_size/duration))) # Send end-of-stream message. for stream in streams: stream.send_heap(item_group.get_end())
python
def main(): stream_config = spead2.send.StreamConfig( max_packet_size=16356, rate=1000e6, burst_size=10, max_heaps=1) item_group = spead2.send.ItemGroup(flavour=spead2.Flavour(4, 64, 48, 0)) # Add item descriptors to the heap. num_baselines = (512 * 513) // 2 dtype = [('TCI', 'i1'), ('FD', 'u1'), ('VIS', '<c8', 4)] item_group.add_item( id=0x6000, name='visibility_timestamp_count', description='', shape=tuple(), format=None, dtype='<u4') item_group.add_item( id=0x6001, name='visibility_timestamp_fraction', description='', shape=tuple(), format=None, dtype='<u4') item_group.add_item( id=0x6005, name='visibility_baseline_count', description='', shape=tuple(), format=None, dtype='<u4') item_group.add_item( id=0x6008, name='scan_id', description='', shape=tuple(), format=None, dtype='<u8') item_group.add_item( id=0x600A, name='correlator_output_data', description='', shape=(num_baselines,), dtype=dtype) # Create streams and send start-of-stream message. streams = [] num_streams = 2 for i in range(num_streams): stream = spead2.send.UdpStream( thread_pool=spead2.ThreadPool(threads=1), hostname='127.0.0.1', port=41000 + i, config=stream_config) stream.send_heap(item_group.get_start()) streams.append(stream) vis = numpy.zeros(shape=(num_baselines,), dtype=dtype) num_heaps = 200 start_time = time.time() for stream in streams: # Update values in the heap. item_group['visibility_timestamp_count'].value = 1 item_group['visibility_timestamp_fraction'].value = 0 item_group['visibility_baseline_count'].value = num_baselines item_group['scan_id'].value = 100000000 item_group['correlator_output_data'].value = vis # Iterate heaps. for i in range(num_heaps): # Send heap. stream.send_heap(item_group.get_heap(descriptors='all', data='all')) # Print time taken. duration = time.time() - start_time data_size = num_streams * num_heaps * (vis.nbytes / 1e6) print("Sent %.3f MB in %.3f sec (%.3f MB/sec)" % ( data_size, duration, (data_size/duration))) # Send end-of-stream message. for stream in streams: stream.send_heap(item_group.get_end())
[ "def", "main", "(", ")", ":", "stream_config", "=", "spead2", ".", "send", ".", "StreamConfig", "(", "max_packet_size", "=", "16356", ",", "rate", "=", "1000e6", ",", "burst_size", "=", "10", ",", "max_heaps", "=", "1", ")", "item_group", "=", "spead2", ...
Runs the test sender.
[ "Runs", "the", "test", "sender", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/science_pipeline_workflows/ingest_visibilities/recv_c/send.py#L11-L69
17,343
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/configuration_db/sip_config_db/_events/event.py
Event.from_config
def from_config(cls, config: dict): """Create an event object from an event dictionary object. Args: config (dict): Event Configuration dictionary. """ timestamp = config.get('timestamp', None) return cls(config.get('id'), config.get('type'), config.get('data', dict()), config.get('origin', None), timestamp, config.get('object_type', None), config.get('object_id', None), config.get('object_key', None))
python
def from_config(cls, config: dict): timestamp = config.get('timestamp', None) return cls(config.get('id'), config.get('type'), config.get('data', dict()), config.get('origin', None), timestamp, config.get('object_type', None), config.get('object_id', None), config.get('object_key', None))
[ "def", "from_config", "(", "cls", ",", "config", ":", "dict", ")", ":", "timestamp", "=", "config", ".", "get", "(", "'timestamp'", ",", "None", ")", "return", "cls", "(", "config", ".", "get", "(", "'id'", ")", ",", "config", ".", "get", "(", "'ty...
Create an event object from an event dictionary object. Args: config (dict): Event Configuration dictionary.
[ "Create", "an", "event", "object", "from", "an", "event", "dictionary", "object", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/configuration_db/sip_config_db/_events/event.py#L45-L60
17,344
SKA-ScienceDataProcessor/integration-prototype
sip/science_pipeline_workflows/example_imager_mpi/example_mpi_imager.py
process_input_data
def process_input_data(filename, imager, grid_data, grid_norm, grid_weights): """Reads visibility data from a Measurement Set. The visibility grid or weights grid is updated accordingly. Visibility data are read from disk in blocks of size num_baselines. Args: filename (str): Name of Measurement Set to open. imager (oskar.Imager): Handle to configured imager. grid_data (numpy.ndarray or None): Visibility grid to populate. grid_norm (float) Current grid normalisation. grid_weights (numpy.ndarray): Weights grid to populate or read. Returns: grid_norm (float): Updated grid normalisation. """ # Get data from the input Measurement Set. ms = oskar.MeasurementSet.open(filename) block_start = 0 num_rows = ms.num_rows num_baselines = ms.num_stations * (ms.num_stations - 1) // 2 # Loop over data blocks of size num_baselines. while block_start < num_rows: block_size = num_rows - block_start if block_size > num_baselines: block_size = num_baselines # Get the baseline coordinates. (Replace this with a query to LTS.) uvw = ms.read_column('UVW', block_start, block_size) # Read the Stokes-I visibility weights. vis_weights = ms.read_column('WEIGHT', block_start, block_size) if ms.num_pols == 4: vis_weights = 0.5 * (vis_weights[:, 0] + vis_weights[:, 3]) # Loop over frequency channels. # (We expect there to be only one channel here, but loop just in case.) for j in range(ms.num_channels): # Get coordinates in wavelengths. coords = uvw * (ms.freq_start_hz + j * ms.freq_inc_hz) / 299792458. # Get the Stokes-I visibilities for this channel. vis_data = None if not imager.coords_only: vis_data = ms.read_vis(block_start, j, 1, block_size) if ms.num_pols == 4: vis_data = 0.5 * (vis_data[0, :, 0] + vis_data[0, :, 3]) # Update the grid plane with this visibility block. grid_norm = imager.update_plane( coords[:, 0], coords[:, 1], coords[:, 2], vis_data, vis_weights, grid_data, grid_norm, grid_weights) # Increment start row by block size. block_start += block_size # Return updated grid normalisation. return grid_norm
python
def process_input_data(filename, imager, grid_data, grid_norm, grid_weights): # Get data from the input Measurement Set. ms = oskar.MeasurementSet.open(filename) block_start = 0 num_rows = ms.num_rows num_baselines = ms.num_stations * (ms.num_stations - 1) // 2 # Loop over data blocks of size num_baselines. while block_start < num_rows: block_size = num_rows - block_start if block_size > num_baselines: block_size = num_baselines # Get the baseline coordinates. (Replace this with a query to LTS.) uvw = ms.read_column('UVW', block_start, block_size) # Read the Stokes-I visibility weights. vis_weights = ms.read_column('WEIGHT', block_start, block_size) if ms.num_pols == 4: vis_weights = 0.5 * (vis_weights[:, 0] + vis_weights[:, 3]) # Loop over frequency channels. # (We expect there to be only one channel here, but loop just in case.) for j in range(ms.num_channels): # Get coordinates in wavelengths. coords = uvw * (ms.freq_start_hz + j * ms.freq_inc_hz) / 299792458. # Get the Stokes-I visibilities for this channel. vis_data = None if not imager.coords_only: vis_data = ms.read_vis(block_start, j, 1, block_size) if ms.num_pols == 4: vis_data = 0.5 * (vis_data[0, :, 0] + vis_data[0, :, 3]) # Update the grid plane with this visibility block. grid_norm = imager.update_plane( coords[:, 0], coords[:, 1], coords[:, 2], vis_data, vis_weights, grid_data, grid_norm, grid_weights) # Increment start row by block size. block_start += block_size # Return updated grid normalisation. return grid_norm
[ "def", "process_input_data", "(", "filename", ",", "imager", ",", "grid_data", ",", "grid_norm", ",", "grid_weights", ")", ":", "# Get data from the input Measurement Set.", "ms", "=", "oskar", ".", "MeasurementSet", ".", "open", "(", "filename", ")", "block_start",...
Reads visibility data from a Measurement Set. The visibility grid or weights grid is updated accordingly. Visibility data are read from disk in blocks of size num_baselines. Args: filename (str): Name of Measurement Set to open. imager (oskar.Imager): Handle to configured imager. grid_data (numpy.ndarray or None): Visibility grid to populate. grid_norm (float) Current grid normalisation. grid_weights (numpy.ndarray): Weights grid to populate or read. Returns: grid_norm (float): Updated grid normalisation.
[ "Reads", "visibility", "data", "from", "a", "Measurement", "Set", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/science_pipeline_workflows/example_imager_mpi/example_mpi_imager.py#L34-L93
17,345
SKA-ScienceDataProcessor/integration-prototype
sip/examples/flask_processing_controller/app/api/scheduling_block_list.py
get
def get(): """Return list of Scheduling Blocks Instances known to SDP .""" LOG.debug('GET list of SBIs.') # Construct response object. _url = get_root_url() response = dict(scheduling_blocks=[], links=dict(home='{}'.format(_url))) # Get ordered list of SBI ID's. block_ids = DB.get_sched_block_instance_ids() # Loop over SBIs and add summary of each to the list of SBIs in the # response. for block in DB.get_block_details(block_ids): block_id = block['id'] LOG.debug('Adding SBI %s to list', block_id) LOG.debug(block) block['num_processing_blocks'] = len(block['processing_block_ids']) temp = ['OK'] * 10 + ['WAITING'] * 4 + ['FAILED'] * 2 block['status'] = choice(temp) try: del block['processing_block_ids'] except KeyError: pass block['links'] = { 'detail': '{}/scheduling-block/{}' .format(_url, block_id) } response['scheduling_blocks'].append(block) return response, HTTPStatus.OK
python
def get(): LOG.debug('GET list of SBIs.') # Construct response object. _url = get_root_url() response = dict(scheduling_blocks=[], links=dict(home='{}'.format(_url))) # Get ordered list of SBI ID's. block_ids = DB.get_sched_block_instance_ids() # Loop over SBIs and add summary of each to the list of SBIs in the # response. for block in DB.get_block_details(block_ids): block_id = block['id'] LOG.debug('Adding SBI %s to list', block_id) LOG.debug(block) block['num_processing_blocks'] = len(block['processing_block_ids']) temp = ['OK'] * 10 + ['WAITING'] * 4 + ['FAILED'] * 2 block['status'] = choice(temp) try: del block['processing_block_ids'] except KeyError: pass block['links'] = { 'detail': '{}/scheduling-block/{}' .format(_url, block_id) } response['scheduling_blocks'].append(block) return response, HTTPStatus.OK
[ "def", "get", "(", ")", ":", "LOG", ".", "debug", "(", "'GET list of SBIs.'", ")", "# Construct response object.", "_url", "=", "get_root_url", "(", ")", "response", "=", "dict", "(", "scheduling_blocks", "=", "[", "]", ",", "links", "=", "dict", "(", "hom...
Return list of Scheduling Blocks Instances known to SDP .
[ "Return", "list", "of", "Scheduling", "Blocks", "Instances", "known", "to", "SDP", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/examples/flask_processing_controller/app/api/scheduling_block_list.py#L20-L51
17,346
SKA-ScienceDataProcessor/integration-prototype
sip/examples/flask_processing_controller/app/api/scheduling_block_list.py
get_table
def get_table(): """Provides table of scheduling block instance metadata for use with AJAX tables""" response = dict(blocks=[]) block_ids = DB.get_sched_block_instance_ids() for index, block_id in enumerate(block_ids): block = DB.get_block_details([block_id]).__next__() info = [ index, block['id'], block['sub_array_id'], len(block['processing_blocks']) ] response['blocks'].append(info) return response, HTTPStatus.OK
python
def get_table(): response = dict(blocks=[]) block_ids = DB.get_sched_block_instance_ids() for index, block_id in enumerate(block_ids): block = DB.get_block_details([block_id]).__next__() info = [ index, block['id'], block['sub_array_id'], len(block['processing_blocks']) ] response['blocks'].append(info) return response, HTTPStatus.OK
[ "def", "get_table", "(", ")", ":", "response", "=", "dict", "(", "blocks", "=", "[", "]", ")", "block_ids", "=", "DB", ".", "get_sched_block_instance_ids", "(", ")", "for", "index", ",", "block_id", "in", "enumerate", "(", "block_ids", ")", ":", "block",...
Provides table of scheduling block instance metadata for use with AJAX tables
[ "Provides", "table", "of", "scheduling", "block", "instance", "metadata", "for", "use", "with", "AJAX", "tables" ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/examples/flask_processing_controller/app/api/scheduling_block_list.py#L64-L78
17,347
SKA-ScienceDataProcessor/integration-prototype
sip/examples/flask_processing_controller/app/old.db/config_db_redis.py
ConfigDB.set_value
def set_value(self, key, field, value): """Add the state of the key and field""" self._db.hset(key, field, value)
python
def set_value(self, key, field, value): self._db.hset(key, field, value)
[ "def", "set_value", "(", "self", ",", "key", ",", "field", ",", "value", ")", ":", "self", ".", "_db", ".", "hset", "(", "key", ",", "field", ",", "value", ")" ]
Add the state of the key and field
[ "Add", "the", "state", "of", "the", "key", "and", "field" ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/examples/flask_processing_controller/app/old.db/config_db_redis.py#L49-L51
17,348
SKA-ScienceDataProcessor/integration-prototype
sip/examples/flask_processing_controller/app/old.db/config_db_redis.py
ConfigDB.push_event
def push_event(self, event_name, event_type, block_id): """Push inserts all the specified values at the tail of the list stored at the key""" self._db.rpush(event_name, dict(type=event_type, id=block_id))
python
def push_event(self, event_name, event_type, block_id): self._db.rpush(event_name, dict(type=event_type, id=block_id))
[ "def", "push_event", "(", "self", ",", "event_name", ",", "event_type", ",", "block_id", ")", ":", "self", ".", "_db", ".", "rpush", "(", "event_name", ",", "dict", "(", "type", "=", "event_type", ",", "id", "=", "block_id", ")", ")" ]
Push inserts all the specified values at the tail of the list stored at the key
[ "Push", "inserts", "all", "the", "specified", "values", "at", "the", "tail", "of", "the", "list", "stored", "at", "the", "key" ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/examples/flask_processing_controller/app/old.db/config_db_redis.py#L98-L101
17,349
SKA-ScienceDataProcessor/integration-prototype
sip/science_pipeline_workflows/receive_pss/pulsar_search_task.py
main
def main(): """Task run method.""" # Install handler to respond to SIGTERM signal.signal(signal.SIGTERM, _sig_handler) with open(sys.argv[1]) as fh: config = json.load(fh) # Starts the pulsar search ftp server os.chdir(os.path.expanduser('~')) receiver = PulsarStart(config, logging.getLogger()) receiver.run()
python
def main(): # Install handler to respond to SIGTERM signal.signal(signal.SIGTERM, _sig_handler) with open(sys.argv[1]) as fh: config = json.load(fh) # Starts the pulsar search ftp server os.chdir(os.path.expanduser('~')) receiver = PulsarStart(config, logging.getLogger()) receiver.run()
[ "def", "main", "(", ")", ":", "# Install handler to respond to SIGTERM", "signal", ".", "signal", "(", "signal", ".", "SIGTERM", ",", "_sig_handler", ")", "with", "open", "(", "sys", ".", "argv", "[", "1", "]", ")", "as", "fh", ":", "config", "=", "json"...
Task run method.
[ "Task", "run", "method", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/science_pipeline_workflows/receive_pss/pulsar_search_task.py#L22-L33
17,350
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/configuration_db/sip_config_db/_config_db_redis.py
check_connection
def check_connection(func): """Check connection exceptions.""" @wraps(func) def with_exception_handling(*args, **kwargs): """Wrap function being decorated.""" try: return func(*args, **kwargs) except redis.exceptions.ConnectionError: raise ConnectionError("Unable to connect to the Redis " "Configuration Database. host = {}, " "port = {}, id = {}." .format(REDIS_HOST, REDIS_PORT, REDIS_DB_ID)) return with_exception_handling
python
def check_connection(func): @wraps(func) def with_exception_handling(*args, **kwargs): """Wrap function being decorated.""" try: return func(*args, **kwargs) except redis.exceptions.ConnectionError: raise ConnectionError("Unable to connect to the Redis " "Configuration Database. host = {}, " "port = {}, id = {}." .format(REDIS_HOST, REDIS_PORT, REDIS_DB_ID)) return with_exception_handling
[ "def", "check_connection", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "with_exception_handling", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"Wrap function being decorated.\"\"\"", "try", ":", "return", "func", "(", "*", "ar...
Check connection exceptions.
[ "Check", "connection", "exceptions", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/configuration_db/sip_config_db/_config_db_redis.py#L19-L32
17,351
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/configuration_db/sip_config_db/_config_db_redis.py
ConfigDb.save_dict
def save_dict(self, key: str, my_dict: dict, hierarchical: bool = False): """Store the specified dictionary at the specified key.""" for _key, _value in my_dict.items(): if isinstance(_value, dict): if not hierarchical: self._db.hmset(key, {_key: json.dumps(_value)}) else: self.save_dict(key + ':' + _key, _value, hierarchical) elif isinstance(_value, list): if not hierarchical: self._db.hmset(key, {_key: str(_value)}) else: print('saving list at ', key + ':' + _key) self._db.lpush(key + ':' + _key, *_value[::-1]) elif isinstance(_value, bool): self._db.hmset(key, {_key: str(_value)}) else: self._db.hmset(key, {_key: _value})
python
def save_dict(self, key: str, my_dict: dict, hierarchical: bool = False): for _key, _value in my_dict.items(): if isinstance(_value, dict): if not hierarchical: self._db.hmset(key, {_key: json.dumps(_value)}) else: self.save_dict(key + ':' + _key, _value, hierarchical) elif isinstance(_value, list): if not hierarchical: self._db.hmset(key, {_key: str(_value)}) else: print('saving list at ', key + ':' + _key) self._db.lpush(key + ':' + _key, *_value[::-1]) elif isinstance(_value, bool): self._db.hmset(key, {_key: str(_value)}) else: self._db.hmset(key, {_key: _value})
[ "def", "save_dict", "(", "self", ",", "key", ":", "str", ",", "my_dict", ":", "dict", ",", "hierarchical", ":", "bool", "=", "False", ")", ":", "for", "_key", ",", "_value", "in", "my_dict", ".", "items", "(", ")", ":", "if", "isinstance", "(", "_v...
Store the specified dictionary at the specified key.
[ "Store", "the", "specified", "dictionary", "at", "the", "specified", "key", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/configuration_db/sip_config_db/_config_db_redis.py#L74-L91
17,352
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/configuration_db/sip_config_db/_config_db_redis.py
ConfigDb._build_dict
def _build_dict(my_dict, keys, values): """Build a dictionary from a set of redis hashes. keys = ['a', 'b', 'c'] values = {'value': 'foo'} my_dict = {'a': {'b': {'c': {'value': 'foo'}}}} Args: my_dict (dict): Dictionary to add to keys (list[str]): List of keys used to define hierarchy in my_dict values (dict): Values to add at to the dictionary at the key specified by keys Returns: dict, new dictionary with values added at keys """ temp = my_dict for depth, key in enumerate(keys): if depth < len(keys) - 1: if key not in temp: temp[key] = dict() temp = temp[key] else: if key not in temp: temp[key] = values else: temp[key] = {**temp[key], **values} return my_dict
python
def _build_dict(my_dict, keys, values): temp = my_dict for depth, key in enumerate(keys): if depth < len(keys) - 1: if key not in temp: temp[key] = dict() temp = temp[key] else: if key not in temp: temp[key] = values else: temp[key] = {**temp[key], **values} return my_dict
[ "def", "_build_dict", "(", "my_dict", ",", "keys", ",", "values", ")", ":", "temp", "=", "my_dict", "for", "depth", ",", "key", "in", "enumerate", "(", "keys", ")", ":", "if", "depth", "<", "len", "(", "keys", ")", "-", "1", ":", "if", "key", "no...
Build a dictionary from a set of redis hashes. keys = ['a', 'b', 'c'] values = {'value': 'foo'} my_dict = {'a': {'b': {'c': {'value': 'foo'}}}} Args: my_dict (dict): Dictionary to add to keys (list[str]): List of keys used to define hierarchy in my_dict values (dict): Values to add at to the dictionary at the key specified by keys Returns: dict, new dictionary with values added at keys
[ "Build", "a", "dictionary", "from", "a", "set", "of", "redis", "hashes", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/configuration_db/sip_config_db/_config_db_redis.py#L94-L122
17,353
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/configuration_db/sip_config_db/_config_db_redis.py
ConfigDb._load_values
def _load_values(self, db_key: str) -> dict: """Load values from the db at the specified key, db_key. FIXME(BMo): Could also be extended to load scalar types (instead of just list and hash) """ if self._db.type(db_key) == 'list': db_values = self._db.lrange(db_key, 0, -1) for i, value in enumerate(db_values): try: db_values[i] = ast.literal_eval(value) except SyntaxError: pass except ValueError: pass else: # self._db.type == 'hash' db_values = self._db.hgetall(db_key) for _key, _value in db_values.items(): try: db_values[_key] = ast.literal_eval(_value) except SyntaxError: pass except ValueError: pass return db_values
python
def _load_values(self, db_key: str) -> dict: if self._db.type(db_key) == 'list': db_values = self._db.lrange(db_key, 0, -1) for i, value in enumerate(db_values): try: db_values[i] = ast.literal_eval(value) except SyntaxError: pass except ValueError: pass else: # self._db.type == 'hash' db_values = self._db.hgetall(db_key) for _key, _value in db_values.items(): try: db_values[_key] = ast.literal_eval(_value) except SyntaxError: pass except ValueError: pass return db_values
[ "def", "_load_values", "(", "self", ",", "db_key", ":", "str", ")", "->", "dict", ":", "if", "self", ".", "_db", ".", "type", "(", "db_key", ")", "==", "'list'", ":", "db_values", "=", "self", ".", "_db", ".", "lrange", "(", "db_key", ",", "0", "...
Load values from the db at the specified key, db_key. FIXME(BMo): Could also be extended to load scalar types (instead of just list and hash)
[ "Load", "values", "from", "the", "db", "at", "the", "specified", "key", "db_key", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/configuration_db/sip_config_db/_config_db_redis.py#L124-L149
17,354
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/configuration_db/sip_config_db/_config_db_redis.py
ConfigDb._load_dict_hierarchical
def _load_dict_hierarchical(self, db_key: str) -> dict: """Load a dictionary stored hierarchically at db_key.""" db_keys = self._db.keys(pattern=db_key + '*') my_dict = {} for _db_key in db_keys: if self._db.type(_db_key) == 'list': db_values = self._db.lrange(_db_key, 0, -1) for i, value in enumerate(db_values): try: db_values[i] = ast.literal_eval(value) except SyntaxError: pass except ValueError: pass else: # self._db.type == 'hash' db_values = self._db.hgetall(_db_key) for _key, _value in db_values.items(): try: db_values[_key] = ast.literal_eval(_value) except SyntaxError: pass except ValueError: pass my_dict = self._build_dict(my_dict, _db_key.split(':'), db_values) return my_dict[db_key]
python
def _load_dict_hierarchical(self, db_key: str) -> dict: db_keys = self._db.keys(pattern=db_key + '*') my_dict = {} for _db_key in db_keys: if self._db.type(_db_key) == 'list': db_values = self._db.lrange(_db_key, 0, -1) for i, value in enumerate(db_values): try: db_values[i] = ast.literal_eval(value) except SyntaxError: pass except ValueError: pass else: # self._db.type == 'hash' db_values = self._db.hgetall(_db_key) for _key, _value in db_values.items(): try: db_values[_key] = ast.literal_eval(_value) except SyntaxError: pass except ValueError: pass my_dict = self._build_dict(my_dict, _db_key.split(':'), db_values) return my_dict[db_key]
[ "def", "_load_dict_hierarchical", "(", "self", ",", "db_key", ":", "str", ")", "->", "dict", ":", "db_keys", "=", "self", ".", "_db", ".", "keys", "(", "pattern", "=", "db_key", "+", "'*'", ")", "my_dict", "=", "{", "}", "for", "_db_key", "in", "db_k...
Load a dictionary stored hierarchically at db_key.
[ "Load", "a", "dictionary", "stored", "hierarchically", "at", "db_key", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/configuration_db/sip_config_db/_config_db_redis.py#L152-L177
17,355
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/configuration_db/sip_config_db/_config_db_redis.py
ConfigDb.load_dict
def load_dict(self, db_key: str, hierarchical: bool = False) -> dict: """Load the dictionary at the specified key. Hierarchically stored dictionaries use a ':' separator to expand the dictionary into a set of Redis hashes. Args: db_key (str): Key at which the dictionary is stored in the db. hierarchical (bool): If True, expect the dictionary to have been stored hierarchically. If False, expect the dictionary to have been stored flat. Returns: dict, the dictionary stored at key """ if not hierarchical: db_values = self._db.hgetall(db_key) for _key, _value in db_values.items(): if isinstance(_value, str): db_values[_key] = ast.literal_eval(_value) my_dict = db_values else: my_dict = self._load_dict_hierarchical(db_key) return my_dict
python
def load_dict(self, db_key: str, hierarchical: bool = False) -> dict: if not hierarchical: db_values = self._db.hgetall(db_key) for _key, _value in db_values.items(): if isinstance(_value, str): db_values[_key] = ast.literal_eval(_value) my_dict = db_values else: my_dict = self._load_dict_hierarchical(db_key) return my_dict
[ "def", "load_dict", "(", "self", ",", "db_key", ":", "str", ",", "hierarchical", ":", "bool", "=", "False", ")", "->", "dict", ":", "if", "not", "hierarchical", ":", "db_values", "=", "self", ".", "_db", ".", "hgetall", "(", "db_key", ")", "for", "_k...
Load the dictionary at the specified key. Hierarchically stored dictionaries use a ':' separator to expand the dictionary into a set of Redis hashes. Args: db_key (str): Key at which the dictionary is stored in the db. hierarchical (bool): If True, expect the dictionary to have been stored hierarchically. If False, expect the dictionary to have been stored flat. Returns: dict, the dictionary stored at key
[ "Load", "the", "dictionary", "at", "the", "specified", "key", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/configuration_db/sip_config_db/_config_db_redis.py#L180-L204
17,356
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/configuration_db/sip_config_db/_config_db_redis.py
ConfigDb.load_dict_values
def load_dict_values(self, db_key: str, dict_keys: List[str], hierarchical: bool = False) -> List: """Load values from a dictionary with the specified dict_keys. Args: db_key (str): Key where the dictionary is stored dict_keys (List[str]): Keys within the dictionary to load. hierarchical (bool): If True, expect the dictionary to have been stored hierarchically. If False, expect the dictionary to have been stored flat. Returns: object: The value stored at dict_key in the dictionary stored at key """ result = [] if not hierarchical: _values = self._db.hmget(db_key, *dict_keys) result = [ast.literal_eval(_value) for _value in _values] else: # Get all keys in the set of keys for this dict 'db_key' db_keys = self._db.keys(pattern=db_key + '*') for _db_key in db_keys: # Check if one of the dict_keys is an entire sub-dict entry for name in _db_key.split(':')[1:]: if name in dict_keys: _values = self._load_values(_db_key) result.append(_values) # Look in the sub-dict for any of the dict_keys _values = self._db.hmget(_db_key, *dict_keys) for i, value in enumerate(_values): try: _values[i] = ast.literal_eval(value) except SyntaxError: pass except ValueError: pass result += [value for value in _values if value is not None] return result
python
def load_dict_values(self, db_key: str, dict_keys: List[str], hierarchical: bool = False) -> List: result = [] if not hierarchical: _values = self._db.hmget(db_key, *dict_keys) result = [ast.literal_eval(_value) for _value in _values] else: # Get all keys in the set of keys for this dict 'db_key' db_keys = self._db.keys(pattern=db_key + '*') for _db_key in db_keys: # Check if one of the dict_keys is an entire sub-dict entry for name in _db_key.split(':')[1:]: if name in dict_keys: _values = self._load_values(_db_key) result.append(_values) # Look in the sub-dict for any of the dict_keys _values = self._db.hmget(_db_key, *dict_keys) for i, value in enumerate(_values): try: _values[i] = ast.literal_eval(value) except SyntaxError: pass except ValueError: pass result += [value for value in _values if value is not None] return result
[ "def", "load_dict_values", "(", "self", ",", "db_key", ":", "str", ",", "dict_keys", ":", "List", "[", "str", "]", ",", "hierarchical", ":", "bool", "=", "False", ")", "->", "List", ":", "result", "=", "[", "]", "if", "not", "hierarchical", ":", "_va...
Load values from a dictionary with the specified dict_keys. Args: db_key (str): Key where the dictionary is stored dict_keys (List[str]): Keys within the dictionary to load. hierarchical (bool): If True, expect the dictionary to have been stored hierarchically. If False, expect the dictionary to have been stored flat. Returns: object: The value stored at dict_key in the dictionary stored at key
[ "Load", "values", "from", "a", "dictionary", "with", "the", "specified", "dict_keys", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/configuration_db/sip_config_db/_config_db_redis.py#L206-L247
17,357
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/configuration_db/sip_config_db/_config_db_redis.py
ConfigDb.set_hash_value
def set_hash_value(self, key, field, value, pipeline=False): """Set the value of field in a hash stored at key. Args: key (str): key (name) of the hash field (str): Field within the hash to set value: Value to set pipeline (bool): True, start a transaction block. Default false. """ # FIXME(BMo): new name for this function -> save_dict_value ? if pipeline: self._pipeline.hset(key, field, str(value)) else: self._db.hset(key, field, str(value))
python
def set_hash_value(self, key, field, value, pipeline=False): # FIXME(BMo): new name for this function -> save_dict_value ? if pipeline: self._pipeline.hset(key, field, str(value)) else: self._db.hset(key, field, str(value))
[ "def", "set_hash_value", "(", "self", ",", "key", ",", "field", ",", "value", ",", "pipeline", "=", "False", ")", ":", "# FIXME(BMo): new name for this function -> save_dict_value ?", "if", "pipeline", ":", "self", ".", "_pipeline", ".", "hset", "(", "key", ",",...
Set the value of field in a hash stored at key. Args: key (str): key (name) of the hash field (str): Field within the hash to set value: Value to set pipeline (bool): True, start a transaction block. Default false.
[ "Set", "the", "value", "of", "field", "in", "a", "hash", "stored", "at", "key", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/configuration_db/sip_config_db/_config_db_redis.py#L264-L278
17,358
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/configuration_db/sip_config_db/_config_db_redis.py
ConfigDb.prepend_to_list
def prepend_to_list(self, key, *value, pipeline=False): """Add new element to the start of the list stored at key. Args: key (str): Key where the list is stored value: Value to add to the list pipeline (bool): True, start a transaction block. Default false. """ if pipeline: self._pipeline.lpush(key, *value) else: self._db.lpush(key, *value)
python
def prepend_to_list(self, key, *value, pipeline=False): if pipeline: self._pipeline.lpush(key, *value) else: self._db.lpush(key, *value)
[ "def", "prepend_to_list", "(", "self", ",", "key", ",", "*", "value", ",", "pipeline", "=", "False", ")", ":", "if", "pipeline", ":", "self", ".", "_pipeline", ".", "lpush", "(", "key", ",", "*", "value", ")", "else", ":", "self", ".", "_db", ".", ...
Add new element to the start of the list stored at key. Args: key (str): Key where the list is stored value: Value to add to the list pipeline (bool): True, start a transaction block. Default false.
[ "Add", "new", "element", "to", "the", "start", "of", "the", "list", "stored", "at", "key", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/configuration_db/sip_config_db/_config_db_redis.py#L308-L320
17,359
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/configuration_db/sip_config_db/_config_db_redis.py
ConfigDb.append_to_list
def append_to_list(self, key, *value, pipeline=False): """Add new element to the end of the list stored at key. Args: key (str): Key where the list is stored value: Value to add to the list pipeline (bool): True, start a transaction block. Default false. """ if pipeline: self._pipeline.rpush(key, *value) else: self._db.rpush(key, *value)
python
def append_to_list(self, key, *value, pipeline=False): if pipeline: self._pipeline.rpush(key, *value) else: self._db.rpush(key, *value)
[ "def", "append_to_list", "(", "self", ",", "key", ",", "*", "value", ",", "pipeline", "=", "False", ")", ":", "if", "pipeline", ":", "self", ".", "_pipeline", ".", "rpush", "(", "key", ",", "*", "value", ")", "else", ":", "self", ".", "_db", ".", ...
Add new element to the end of the list stored at key. Args: key (str): Key where the list is stored value: Value to add to the list pipeline (bool): True, start a transaction block. Default false.
[ "Add", "new", "element", "to", "the", "end", "of", "the", "list", "stored", "at", "key", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/configuration_db/sip_config_db/_config_db_redis.py#L323-L335
17,360
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/configuration_db/sip_config_db/_config_db_redis.py
ConfigDb.get_list
def get_list(self, key, pipeline=False): """Get all the value in the list stored at key. Args: key (str): Key where the list is stored. pipeline (bool): True, start a transaction block. Default false. Returns: list: values in the list ordered by list index """ if pipeline: return self._pipeline.lrange(key, 0, -1) return self._db.lrange(key, 0, -1)
python
def get_list(self, key, pipeline=False): if pipeline: return self._pipeline.lrange(key, 0, -1) return self._db.lrange(key, 0, -1)
[ "def", "get_list", "(", "self", ",", "key", ",", "pipeline", "=", "False", ")", ":", "if", "pipeline", ":", "return", "self", ".", "_pipeline", ".", "lrange", "(", "key", ",", "0", ",", "-", "1", ")", "return", "self", ".", "_db", ".", "lrange", ...
Get all the value in the list stored at key. Args: key (str): Key where the list is stored. pipeline (bool): True, start a transaction block. Default false. Returns: list: values in the list ordered by list index
[ "Get", "all", "the", "value", "in", "the", "list", "stored", "at", "key", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/configuration_db/sip_config_db/_config_db_redis.py#L352-L366
17,361
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/configuration_db/sip_config_db/_config_db_redis.py
ConfigDb.delete
def delete(self, *names: str, pipeline=False): """Delete one or more keys specified by names. Args: names (str): Names of keys to delete pipeline (bool): True, start a transaction block. Default false. """ if pipeline: self._pipeline.delete(*names) else: self._db.delete(*names)
python
def delete(self, *names: str, pipeline=False): if pipeline: self._pipeline.delete(*names) else: self._db.delete(*names)
[ "def", "delete", "(", "self", ",", "*", "names", ":", "str", ",", "pipeline", "=", "False", ")", ":", "if", "pipeline", ":", "self", ".", "_pipeline", ".", "delete", "(", "*", "names", ")", "else", ":", "self", ".", "_db", ".", "delete", "(", "*"...
Delete one or more keys specified by names. Args: names (str): Names of keys to delete pipeline (bool): True, start a transaction block. Default false.
[ "Delete", "one", "or", "more", "keys", "specified", "by", "names", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/configuration_db/sip_config_db/_config_db_redis.py#L397-L407
17,362
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/configuration_db/sip_config_db/_config_db_redis.py
ConfigDb.get_event
def get_event(self, event_name, event_history=None): """Get an event from the database. Gets an event from the named event list removing the event and adding it to the event history. Args: event_name (str): Event list key. event_history (str, optional): Event history list. Returns: str: string representation of the event object """ if event_history is None: event_history = event_name + '_history' return self._db.rpoplpush(event_name, event_history)
python
def get_event(self, event_name, event_history=None): if event_history is None: event_history = event_name + '_history' return self._db.rpoplpush(event_name, event_history)
[ "def", "get_event", "(", "self", ",", "event_name", ",", "event_history", "=", "None", ")", ":", "if", "event_history", "is", "None", ":", "event_history", "=", "event_name", "+", "'_history'", "return", "self", ".", "_db", ".", "rpoplpush", "(", "event_name...
Get an event from the database. Gets an event from the named event list removing the event and adding it to the event history. Args: event_name (str): Event list key. event_history (str, optional): Event history list. Returns: str: string representation of the event object
[ "Get", "an", "event", "from", "the", "database", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/configuration_db/sip_config_db/_config_db_redis.py#L437-L453
17,363
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/configuration_db/sip_config_db/_config_db_redis.py
ConfigDb.watch
def watch(self, key, pipeline=False): """Watch the given key. Marks the given key to be watch for conditional execution of a transaction. Args: key (str): Key that needs to be watched pipeline (bool): True, start a transaction block. Default false. """ if pipeline: self._pipeline.watch(key) else: self._db.watch(key)
python
def watch(self, key, pipeline=False): if pipeline: self._pipeline.watch(key) else: self._db.watch(key)
[ "def", "watch", "(", "self", ",", "key", ",", "pipeline", "=", "False", ")", ":", "if", "pipeline", ":", "self", ".", "_pipeline", ".", "watch", "(", "key", ")", "else", ":", "self", ".", "_db", ".", "watch", "(", "key", ")" ]
Watch the given key. Marks the given key to be watch for conditional execution of a transaction. Args: key (str): Key that needs to be watched pipeline (bool): True, start a transaction block. Default false.
[ "Watch", "the", "given", "key", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/configuration_db/sip_config_db/_config_db_redis.py#L504-L518
17,364
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/configuration_db/sip_config_db/_config_db_redis.py
ConfigDb.publish
def publish(self, channel, message, pipeline=False): """Post a message to a given channel. Args: channel (str): Channel where the message will be published message (str): Message to publish pipeline (bool): True, start a transaction block. Default false. """ if pipeline: self._pipeline.publish(channel, message) else: self._db.publish(channel, message)
python
def publish(self, channel, message, pipeline=False): if pipeline: self._pipeline.publish(channel, message) else: self._db.publish(channel, message)
[ "def", "publish", "(", "self", ",", "channel", ",", "message", ",", "pipeline", "=", "False", ")", ":", "if", "pipeline", ":", "self", ".", "_pipeline", ".", "publish", "(", "channel", ",", "message", ")", "else", ":", "self", ".", "_db", ".", "publi...
Post a message to a given channel. Args: channel (str): Channel where the message will be published message (str): Message to publish pipeline (bool): True, start a transaction block. Default false.
[ "Post", "a", "message", "to", "a", "given", "channel", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/configuration_db/sip_config_db/_config_db_redis.py#L521-L533
17,365
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/master_controller/master_controller_healthcheck.py
MasterHealthCheck.get_services_health
def get_services_health(self) -> dict: """Get the health of all services. Returns: dict, services id and health status """ # Initialise services_health = {} # Get Service IDs services_ids = self._get_services() for service_id in services_ids: service_name = DC.get_service_name(service_id) # Check if the current and actual replica levels are the same if DC.get_replicas(service_id) != \ DC.get_actual_replica(service_id): services_health[service_name] = "Unhealthy" else: services_health[service_name] = "Healthy" return services_health
python
def get_services_health(self) -> dict: # Initialise services_health = {} # Get Service IDs services_ids = self._get_services() for service_id in services_ids: service_name = DC.get_service_name(service_id) # Check if the current and actual replica levels are the same if DC.get_replicas(service_id) != \ DC.get_actual_replica(service_id): services_health[service_name] = "Unhealthy" else: services_health[service_name] = "Healthy" return services_health
[ "def", "get_services_health", "(", "self", ")", "->", "dict", ":", "# Initialise", "services_health", "=", "{", "}", "# Get Service IDs", "services_ids", "=", "self", ".", "_get_services", "(", ")", "for", "service_id", "in", "services_ids", ":", "service_name", ...
Get the health of all services. Returns: dict, services id and health status
[ "Get", "the", "health", "of", "all", "services", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/master_controller/master_controller_healthcheck.py#L41-L64
17,366
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/master_controller/master_controller_healthcheck.py
MasterHealthCheck.get_overall_services_health
def get_overall_services_health(self) -> str: """Get the overall health of all the services. Returns: str, overall health status """ services_health_status = self.get_services_health() # Evaluate overall health health_status = all(status == "Healthy" for status in services_health_status.values()) # Converting from bool to str if health_status: overall_status = "Healthy" else: overall_status = "Unhealthy" return overall_status
python
def get_overall_services_health(self) -> str: services_health_status = self.get_services_health() # Evaluate overall health health_status = all(status == "Healthy" for status in services_health_status.values()) # Converting from bool to str if health_status: overall_status = "Healthy" else: overall_status = "Unhealthy" return overall_status
[ "def", "get_overall_services_health", "(", "self", ")", "->", "str", ":", "services_health_status", "=", "self", ".", "get_services_health", "(", ")", "# Evaluate overall health", "health_status", "=", "all", "(", "status", "==", "\"Healthy\"", "for", "status", "in"...
Get the overall health of all the services. Returns: str, overall health status
[ "Get", "the", "overall", "health", "of", "all", "the", "services", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/master_controller/master_controller_healthcheck.py#L66-L85
17,367
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/master_controller/master_controller_healthcheck.py
MasterHealthCheck.get_service_health
def get_service_health(service_id: str) -> str: """Get the health of a service using service_id. Args: service_id Returns: str, health status """ # Check if the current and actual replica levels are the same if DC.get_replicas(service_id) != DC.get_actual_replica(service_id): health_status = "Unhealthy" else: health_status = "Healthy" return health_status
python
def get_service_health(service_id: str) -> str: # Check if the current and actual replica levels are the same if DC.get_replicas(service_id) != DC.get_actual_replica(service_id): health_status = "Unhealthy" else: health_status = "Healthy" return health_status
[ "def", "get_service_health", "(", "service_id", ":", "str", ")", "->", "str", ":", "# Check if the current and actual replica levels are the same", "if", "DC", ".", "get_replicas", "(", "service_id", ")", "!=", "DC", ".", "get_actual_replica", "(", "service_id", ")", ...
Get the health of a service using service_id. Args: service_id Returns: str, health status
[ "Get", "the", "health", "of", "a", "service", "using", "service_id", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/master_controller/master_controller_healthcheck.py#L92-L108
17,368
SKA-ScienceDataProcessor/integration-prototype
sip/examples/flask_processing_controller/app/api/health.py
get
def get(): """Check the health of this service""" uptime = time.time() - START_TIME response = dict(uptime=f'{uptime:.2f}s', links=dict(root='{}'.format(get_root_url()))) # TODO(BM) check if we can connect to the config database ... # try: # DB.get_sub_array_ids() # except ConnectionError as error: # response['state'] = 'ERROR' # response['message'] = str(error) return response, HTTPStatus.OK
python
def get(): uptime = time.time() - START_TIME response = dict(uptime=f'{uptime:.2f}s', links=dict(root='{}'.format(get_root_url()))) # TODO(BM) check if we can connect to the config database ... # try: # DB.get_sub_array_ids() # except ConnectionError as error: # response['state'] = 'ERROR' # response['message'] = str(error) return response, HTTPStatus.OK
[ "def", "get", "(", ")", ":", "uptime", "=", "time", ".", "time", "(", ")", "-", "START_TIME", "response", "=", "dict", "(", "uptime", "=", "f'{uptime:.2f}s'", ",", "links", "=", "dict", "(", "root", "=", "'{}'", ".", "format", "(", "get_root_url", "(...
Check the health of this service
[ "Check", "the", "health", "of", "this", "service" ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/examples/flask_processing_controller/app/api/health.py#L17-L29
17,369
SKA-ScienceDataProcessor/integration-prototype
sip/science_pipeline_workflows/ingest_visibilities/recv/async_recv.py
main
def main(): """Main function for SPEAD receiver module.""" # Check command line arguments. if len(sys.argv) < 2: raise RuntimeError('Usage: python3 async_recv.py <json config>') # Set up logging. sip_logging.init_logger(show_thread=True) # Load SPEAD configuration from JSON file. # with open(sys.argv[-1]) as f: # spead_config = json.load(f) spead_config = json.loads(sys.argv[1]) # Set up the SPEAD receiver and run it (see method, above). receiver = SpeadReceiver(spead_config) receiver.run()
python
def main(): # Check command line arguments. if len(sys.argv) < 2: raise RuntimeError('Usage: python3 async_recv.py <json config>') # Set up logging. sip_logging.init_logger(show_thread=True) # Load SPEAD configuration from JSON file. # with open(sys.argv[-1]) as f: # spead_config = json.load(f) spead_config = json.loads(sys.argv[1]) # Set up the SPEAD receiver and run it (see method, above). receiver = SpeadReceiver(spead_config) receiver.run()
[ "def", "main", "(", ")", ":", "# Check command line arguments.", "if", "len", "(", "sys", ".", "argv", ")", "<", "2", ":", "raise", "RuntimeError", "(", "'Usage: python3 async_recv.py <json config>'", ")", "# Set up logging.", "sip_logging", ".", "init_logger", "(",...
Main function for SPEAD receiver module.
[ "Main", "function", "for", "SPEAD", "receiver", "module", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/science_pipeline_workflows/ingest_visibilities/recv/async_recv.py#L171-L187
17,370
SKA-ScienceDataProcessor/integration-prototype
sip/science_pipeline_workflows/ingest_visibilities/recv/async_recv.py
SpeadReceiver.process_buffer
def process_buffer(self, i_block, receive_buffer): """Blocking function to process the received heaps. This is run in an executor. """ self._log.info("Worker thread processing block %i", i_block) time_overall0 = time.time() time_unpack = 0.0 time_write = 0.0 for i_heap, heap in enumerate(receive_buffer.result()): # Skip and log any incomplete heaps. if isinstance(heap, spead2.recv.IncompleteHeap): self._log.info("Dropped incomplete heap %i", heap.cnt + 1) continue # Update the item group from this heap. items = self._item_group.update(heap) # Get the time and channel indices from the heap index. i_chan = i_heap // self._num_buffer_times i_time = i_heap % self._num_buffer_times if 'correlator_output_data' in items: vis_data = items['correlator_output_data'].value['VIS'] if self._block is None: num_baselines = vis_data.shape[0] num_pols = vis_data[0].shape[0] self._block = numpy.zeros((self._num_buffer_times, self._num_streams, num_baselines), dtype=('c8', num_pols)) self._block[:, :, :] = 0 # To make the copies faster. # Unpack data from the heap into the block to be processed. time_unpack0 = time.time() self._block[i_time, i_chan, :] = vis_data time_unpack += time.time() - time_unpack0 # Check the data for debugging! val = self._block[i_time, i_chan, -1][-1].real self._log.debug("Data: %.3f", val) if self._block is not None: # Process the buffered data here. if self._config['process_data']: pass # Write the buffered data to storage. if self._config['write_data']: time_write0 = time.time() with open(self._config['filename'], 'ab') as f: # Don't use pickle, it's really slow (even protocol 4)! numpy.save(f, self._block, allow_pickle=False) time_write += time.time() - time_write0 # Report time taken. time_overall = time.time() - time_overall0 self._log.info("Total processing time: %.1f ms", 1000 * time_overall) self._log.info("Unpack was %.1f %%", 100 * time_unpack / time_overall) self._log.info("Write was %.1f %%", 100 * time_write / time_overall) if time_unpack != 0.0: self._log.info("Memory speed %.1f MB/s", (self._block.nbytes * 1e-6) / time_unpack) if time_write != 0.0: self._log.info("Write speed %.1f MB/s", (self._block.nbytes * 1e-6) / time_write)
python
def process_buffer(self, i_block, receive_buffer): self._log.info("Worker thread processing block %i", i_block) time_overall0 = time.time() time_unpack = 0.0 time_write = 0.0 for i_heap, heap in enumerate(receive_buffer.result()): # Skip and log any incomplete heaps. if isinstance(heap, spead2.recv.IncompleteHeap): self._log.info("Dropped incomplete heap %i", heap.cnt + 1) continue # Update the item group from this heap. items = self._item_group.update(heap) # Get the time and channel indices from the heap index. i_chan = i_heap // self._num_buffer_times i_time = i_heap % self._num_buffer_times if 'correlator_output_data' in items: vis_data = items['correlator_output_data'].value['VIS'] if self._block is None: num_baselines = vis_data.shape[0] num_pols = vis_data[0].shape[0] self._block = numpy.zeros((self._num_buffer_times, self._num_streams, num_baselines), dtype=('c8', num_pols)) self._block[:, :, :] = 0 # To make the copies faster. # Unpack data from the heap into the block to be processed. time_unpack0 = time.time() self._block[i_time, i_chan, :] = vis_data time_unpack += time.time() - time_unpack0 # Check the data for debugging! val = self._block[i_time, i_chan, -1][-1].real self._log.debug("Data: %.3f", val) if self._block is not None: # Process the buffered data here. if self._config['process_data']: pass # Write the buffered data to storage. if self._config['write_data']: time_write0 = time.time() with open(self._config['filename'], 'ab') as f: # Don't use pickle, it's really slow (even protocol 4)! numpy.save(f, self._block, allow_pickle=False) time_write += time.time() - time_write0 # Report time taken. time_overall = time.time() - time_overall0 self._log.info("Total processing time: %.1f ms", 1000 * time_overall) self._log.info("Unpack was %.1f %%", 100 * time_unpack / time_overall) self._log.info("Write was %.1f %%", 100 * time_write / time_overall) if time_unpack != 0.0: self._log.info("Memory speed %.1f MB/s", (self._block.nbytes * 1e-6) / time_unpack) if time_write != 0.0: self._log.info("Write speed %.1f MB/s", (self._block.nbytes * 1e-6) / time_write)
[ "def", "process_buffer", "(", "self", ",", "i_block", ",", "receive_buffer", ")", ":", "self", ".", "_log", ".", "info", "(", "\"Worker thread processing block %i\"", ",", "i_block", ")", "time_overall0", "=", "time", ".", "time", "(", ")", "time_unpack", "=",...
Blocking function to process the received heaps. This is run in an executor.
[ "Blocking", "function", "to", "process", "the", "received", "heaps", ".", "This", "is", "run", "in", "an", "executor", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/science_pipeline_workflows/ingest_visibilities/recv/async_recv.py#L50-L114
17,371
SKA-ScienceDataProcessor/integration-prototype
sip/science_pipeline_workflows/ingest_visibilities/recv/async_recv.py
SpeadReceiver.run
def run(self): """Starts the receiver.""" executor = concurrent.futures.ThreadPoolExecutor(max_workers=1) loop = asyncio.get_event_loop() loop.run_until_complete(self._run_loop(executor)) self._log.info('Shutting down...') executor.shutdown()
python
def run(self): executor = concurrent.futures.ThreadPoolExecutor(max_workers=1) loop = asyncio.get_event_loop() loop.run_until_complete(self._run_loop(executor)) self._log.info('Shutting down...') executor.shutdown()
[ "def", "run", "(", "self", ")", ":", "executor", "=", "concurrent", ".", "futures", ".", "ThreadPoolExecutor", "(", "max_workers", "=", "1", ")", "loop", "=", "asyncio", ".", "get_event_loop", "(", ")", "loop", ".", "run_until_complete", "(", "self", ".", ...
Starts the receiver.
[ "Starts", "the", "receiver", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/science_pipeline_workflows/ingest_visibilities/recv/async_recv.py#L161-L167
17,372
SKA-ScienceDataProcessor/integration-prototype
sip/science_pipeline_workflows/receive_pss/pulsar_search.py
PulsarStart.run
def run(self): """Start the FTP Server for pulsar search.""" self._log.info('Starting Pulsar Search Interface') # Instantiate a dummy authorizer for managing 'virtual' users authorizer = DummyAuthorizer() # Define a new user having full r/w permissions and a read-only # anonymous user authorizer.add_user(self._config['login']['user'], self._config['login']['psswd'], '.', perm=self._config['login']['perm']) authorizer.add_anonymous(os.getcwd()) # Instantiate FTP handler class handler = FTPHandler handler.authorizer = authorizer handler.abstracted_fs = PulsarFileSystem # Define a customized banner (string returned when client connects) handler.banner = "SKA SDP pulsar search interface." # Instantiate FTP server class and listen on 0.0.0.0:7878 address = (self._config['address']['listen'], self._config['address']['port']) server = FTPServer(address, handler) # set a limit for connections server.max_cons = 256 server.max_cons_per_ip = 5 # start ftp server server.serve_forever()
python
def run(self): self._log.info('Starting Pulsar Search Interface') # Instantiate a dummy authorizer for managing 'virtual' users authorizer = DummyAuthorizer() # Define a new user having full r/w permissions and a read-only # anonymous user authorizer.add_user(self._config['login']['user'], self._config['login']['psswd'], '.', perm=self._config['login']['perm']) authorizer.add_anonymous(os.getcwd()) # Instantiate FTP handler class handler = FTPHandler handler.authorizer = authorizer handler.abstracted_fs = PulsarFileSystem # Define a customized banner (string returned when client connects) handler.banner = "SKA SDP pulsar search interface." # Instantiate FTP server class and listen on 0.0.0.0:7878 address = (self._config['address']['listen'], self._config['address']['port']) server = FTPServer(address, handler) # set a limit for connections server.max_cons = 256 server.max_cons_per_ip = 5 # start ftp server server.serve_forever()
[ "def", "run", "(", "self", ")", ":", "self", ".", "_log", ".", "info", "(", "'Starting Pulsar Search Interface'", ")", "# Instantiate a dummy authorizer for managing 'virtual' users", "authorizer", "=", "DummyAuthorizer", "(", ")", "# Define a new user having full r/w permiss...
Start the FTP Server for pulsar search.
[ "Start", "the", "FTP", "Server", "for", "pulsar", "search", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/science_pipeline_workflows/receive_pss/pulsar_search.py#L131-L162
17,373
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/configuration_db/sip_config_db/scheduling/workflow_stage.py
WorkflowStage.status
def status(self) -> str: """Return the workflow stage status.""" # As status is a modifiable property, have to reload from the db. self._config = self._load_config() return self._config.get('status')
python
def status(self) -> str: # As status is a modifiable property, have to reload from the db. self._config = self._load_config() return self._config.get('status')
[ "def", "status", "(", "self", ")", "->", "str", ":", "# As status is a modifiable property, have to reload from the db.", "self", ".", "_config", "=", "self", ".", "_load_config", "(", ")", "return", "self", ".", "_config", ".", "get", "(", "'status'", ")" ]
Return the workflow stage status.
[ "Return", "the", "workflow", "stage", "status", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/configuration_db/sip_config_db/scheduling/workflow_stage.py#L62-L66
17,374
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/configuration_db/sip_config_db/scheduling/workflow_stage.py
WorkflowStage.status
def status(self, value): """Set the workflow stage status.""" # FIXME(BM) This is currently a hack because workflow stages # don't each have their own db entry. pb_key = SchedulingObject.get_key(PB_KEY, self._pb_id) stages = DB.get_hash_value(pb_key, 'workflow_stages') stages = ast.literal_eval(stages) stages[self._index]['status'] = value DB.set_hash_value(pb_key, 'workflow_stages', stages)
python
def status(self, value): # FIXME(BM) This is currently a hack because workflow stages # don't each have their own db entry. pb_key = SchedulingObject.get_key(PB_KEY, self._pb_id) stages = DB.get_hash_value(pb_key, 'workflow_stages') stages = ast.literal_eval(stages) stages[self._index]['status'] = value DB.set_hash_value(pb_key, 'workflow_stages', stages)
[ "def", "status", "(", "self", ",", "value", ")", ":", "# FIXME(BM) This is currently a hack because workflow stages", "# don't each have their own db entry.", "pb_key", "=", "SchedulingObject", ".", "get_key", "(", "PB_KEY", ",", "self", ".", "_pb_id", ")", "sta...
Set the workflow stage status.
[ "Set", "the", "workflow", "stage", "status", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/configuration_db/sip_config_db/scheduling/workflow_stage.py#L69-L77
17,375
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/configuration_db/sip_config_db/scheduling/workflow_stage.py
WorkflowStage._load_config
def _load_config(self): """Load the workflow stage config from the database.""" pb_key = SchedulingObject.get_key(PB_KEY, self._pb_id) stages = DB.get_hash_value(pb_key, 'workflow_stages') stages = ast.literal_eval(stages) return stages[self._index]
python
def _load_config(self): pb_key = SchedulingObject.get_key(PB_KEY, self._pb_id) stages = DB.get_hash_value(pb_key, 'workflow_stages') stages = ast.literal_eval(stages) return stages[self._index]
[ "def", "_load_config", "(", "self", ")", ":", "pb_key", "=", "SchedulingObject", ".", "get_key", "(", "PB_KEY", ",", "self", ".", "_pb_id", ")", "stages", "=", "DB", ".", "get_hash_value", "(", "pb_key", ",", "'workflow_stages'", ")", "stages", "=", "ast",...
Load the workflow stage config from the database.
[ "Load", "the", "workflow", "stage", "config", "from", "the", "database", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/configuration_db/sip_config_db/scheduling/workflow_stage.py#L130-L135
17,376
SKA-ScienceDataProcessor/integration-prototype
sip/tango_control/tango_processing_block/app/register_devices.py
register_pb_devices
def register_pb_devices(num_pbs: int = 100): """Register PBs devices. Note(BMo): Ideally we do not want to register any devices here. There does not seem to be a way to create a device server with no registered devices in Tango. This is (probably) because Tango devices must have been registered before the server starts ... """ tango_db = Database() LOG.info("Registering PB devices:") dev_info = DbDevInfo() # pylint: disable=protected-access dev_info._class = 'ProcessingBlockDevice' dev_info.server = 'processing_block_ds/1' for index in range(num_pbs): dev_info.name = 'sip_sdp/pb/{:05d}'.format(index) LOG.info("\t%s", dev_info.name) tango_db.add_device(dev_info)
python
def register_pb_devices(num_pbs: int = 100): tango_db = Database() LOG.info("Registering PB devices:") dev_info = DbDevInfo() # pylint: disable=protected-access dev_info._class = 'ProcessingBlockDevice' dev_info.server = 'processing_block_ds/1' for index in range(num_pbs): dev_info.name = 'sip_sdp/pb/{:05d}'.format(index) LOG.info("\t%s", dev_info.name) tango_db.add_device(dev_info)
[ "def", "register_pb_devices", "(", "num_pbs", ":", "int", "=", "100", ")", ":", "tango_db", "=", "Database", "(", ")", "LOG", ".", "info", "(", "\"Registering PB devices:\"", ")", "dev_info", "=", "DbDevInfo", "(", ")", "# pylint: disable=protected-access", "dev...
Register PBs devices. Note(BMo): Ideally we do not want to register any devices here. There does not seem to be a way to create a device server with no registered devices in Tango. This is (probably) because Tango devices must have been registered before the server starts ...
[ "Register", "PBs", "devices", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/tango_control/tango_processing_block/app/register_devices.py#L19-L37
17,377
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/configuration_db/sip_config_db/scheduling/scheduling_block_instance.py
SchedulingBlockInstance.from_config
def from_config(cls, config_dict: dict, schema_path: str = None): """Create an SBI object from the specified configuration dict. NOTE(BM) This should really be done as a single atomic db transaction. Args: config_dict(dict): SBI configuration dictionary schema_path(str, optional): Path to the SBI config schema. """ # Validate the SBI config schema if schema_path is None: schema_path = join(dirname(__file__), 'schema', 'configure_sbi.json') with open(schema_path, 'r') as file: schema = json.loads(file.read()) validate(config_dict, schema) # Add SBI status field config_dict['status'] = 'created' # Set the subarray field to None if not defined. if 'subarray_id' not in config_dict: config_dict['subarray_id'] = 'None' # Add created, and updated timestamps. timestamp = datetime.datetime.utcnow().isoformat() config_dict['created'] = timestamp config_dict['updated'] = timestamp # Split out the processing block data array pb_list = copy.deepcopy(config_dict['processing_blocks']) # Remove processing blocks from the SBI configuration. config_dict.pop('processing_blocks', None) # Add list of PB ids to the SBI configuration config_dict['processing_block_ids'] = [] for pb in pb_list: config_dict['processing_block_ids'].append(pb['id']) # Add the SBI data object to the database. key = SchedulingObject.get_key(SBI_KEY, config_dict['id']) DB.save_dict(key, config_dict, hierarchical=False) # DB.set_hash_values(key, config_dict) # Add the SBI id to the list of active SBIs key = '{}:active'.format(SBI_KEY) DB.append_to_list(key, config_dict['id']) # Publish notification to subscribers sbi = SchedulingObject(SBI_KEY, config_dict['id']) sbi.set_status('created') for pb in pb_list: pb['sbi_id'] = config_dict['id'] cls._add_pb(pb) return cls(config_dict['id'])
python
def from_config(cls, config_dict: dict, schema_path: str = None): # Validate the SBI config schema if schema_path is None: schema_path = join(dirname(__file__), 'schema', 'configure_sbi.json') with open(schema_path, 'r') as file: schema = json.loads(file.read()) validate(config_dict, schema) # Add SBI status field config_dict['status'] = 'created' # Set the subarray field to None if not defined. if 'subarray_id' not in config_dict: config_dict['subarray_id'] = 'None' # Add created, and updated timestamps. timestamp = datetime.datetime.utcnow().isoformat() config_dict['created'] = timestamp config_dict['updated'] = timestamp # Split out the processing block data array pb_list = copy.deepcopy(config_dict['processing_blocks']) # Remove processing blocks from the SBI configuration. config_dict.pop('processing_blocks', None) # Add list of PB ids to the SBI configuration config_dict['processing_block_ids'] = [] for pb in pb_list: config_dict['processing_block_ids'].append(pb['id']) # Add the SBI data object to the database. key = SchedulingObject.get_key(SBI_KEY, config_dict['id']) DB.save_dict(key, config_dict, hierarchical=False) # DB.set_hash_values(key, config_dict) # Add the SBI id to the list of active SBIs key = '{}:active'.format(SBI_KEY) DB.append_to_list(key, config_dict['id']) # Publish notification to subscribers sbi = SchedulingObject(SBI_KEY, config_dict['id']) sbi.set_status('created') for pb in pb_list: pb['sbi_id'] = config_dict['id'] cls._add_pb(pb) return cls(config_dict['id'])
[ "def", "from_config", "(", "cls", ",", "config_dict", ":", "dict", ",", "schema_path", ":", "str", "=", "None", ")", ":", "# Validate the SBI config schema", "if", "schema_path", "is", "None", ":", "schema_path", "=", "join", "(", "dirname", "(", "__file__", ...
Create an SBI object from the specified configuration dict. NOTE(BM) This should really be done as a single atomic db transaction. Args: config_dict(dict): SBI configuration dictionary schema_path(str, optional): Path to the SBI config schema.
[ "Create", "an", "SBI", "object", "from", "the", "specified", "configuration", "dict", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/configuration_db/sip_config_db/scheduling/scheduling_block_instance.py#L39-L97
17,378
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/configuration_db/sip_config_db/scheduling/scheduling_block_instance.py
SchedulingBlockInstance.get_pb_ids
def get_pb_ids(self) -> List[str]: """Return the list of PB ids associated with the SBI. Returns: list, Processing block ids """ values = DB.get_hash_value(self._key, 'processing_block_ids') return ast.literal_eval(values)
python
def get_pb_ids(self) -> List[str]: values = DB.get_hash_value(self._key, 'processing_block_ids') return ast.literal_eval(values)
[ "def", "get_pb_ids", "(", "self", ")", "->", "List", "[", "str", "]", ":", "values", "=", "DB", ".", "get_hash_value", "(", "self", ".", "_key", ",", "'processing_block_ids'", ")", "return", "ast", ".", "literal_eval", "(", "values", ")" ]
Return the list of PB ids associated with the SBI. Returns: list, Processing block ids
[ "Return", "the", "list", "of", "PB", "ids", "associated", "with", "the", "SBI", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/configuration_db/sip_config_db/scheduling/scheduling_block_instance.py#L133-L141
17,379
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/configuration_db/sip_config_db/scheduling/scheduling_block_instance.py
SchedulingBlockInstance.get_id
def get_id(date=None, project: str = 'sip', instance_id: int = None) -> str: """Get a SBI Identifier. Args: date (str or datetime.datetime, optional): UTC date of the SBI project (str, optional ): Project Name instance_id (int, optional): SBI instance identifier Returns: str, Scheduling Block Instance (SBI) ID. """ if date is None: date = datetime.datetime.utcnow() if isinstance(date, datetime.datetime): date = date.strftime('%Y%m%d') if instance_id is None: instance_id = randint(0, 9999) return 'SBI-{}-{}-{:04d}'.format(date, project, instance_id)
python
def get_id(date=None, project: str = 'sip', instance_id: int = None) -> str: if date is None: date = datetime.datetime.utcnow() if isinstance(date, datetime.datetime): date = date.strftime('%Y%m%d') if instance_id is None: instance_id = randint(0, 9999) return 'SBI-{}-{}-{:04d}'.format(date, project, instance_id)
[ "def", "get_id", "(", "date", "=", "None", ",", "project", ":", "str", "=", "'sip'", ",", "instance_id", ":", "int", "=", "None", ")", "->", "str", ":", "if", "date", "is", "None", ":", "date", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ...
Get a SBI Identifier. Args: date (str or datetime.datetime, optional): UTC date of the SBI project (str, optional ): Project Name instance_id (int, optional): SBI instance identifier Returns: str, Scheduling Block Instance (SBI) ID.
[ "Get", "a", "SBI", "Identifier", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/configuration_db/sip_config_db/scheduling/scheduling_block_instance.py#L144-L166
17,380
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/configuration_db/sip_config_db/scheduling/scheduling_block_instance.py
SchedulingBlockInstance._update_workflow_definition
def _update_workflow_definition(pb_config: dict): """Update the PB configuration workflow definition. Args: pb_config (dict): PB configuration dictionary Raises: RunTimeError, if the workflow definition (id, version) specified in the sbi_config is not known. """ known_workflows = get_workflows() workflow_id = pb_config['workflow']['id'] workflow_version = pb_config['workflow']['version'] if workflow_id not in known_workflows or \ workflow_version not in known_workflows[workflow_id]: raise RuntimeError("Unknown workflow definition: {}:{}" .format(workflow_id, workflow_version)) workflow = get_workflow(workflow_id, workflow_version) for stage in workflow['stages']: stage['status'] = 'none' pb_config['workflow_parameters'] = pb_config['workflow']['parameters'] pb_config['workflow_id'] = pb_config['workflow']['id'] pb_config['workflow_version'] = pb_config['workflow']['version'] pb_config['workflow_stages'] = workflow['stages'] pb_config.pop('workflow', None)
python
def _update_workflow_definition(pb_config: dict): known_workflows = get_workflows() workflow_id = pb_config['workflow']['id'] workflow_version = pb_config['workflow']['version'] if workflow_id not in known_workflows or \ workflow_version not in known_workflows[workflow_id]: raise RuntimeError("Unknown workflow definition: {}:{}" .format(workflow_id, workflow_version)) workflow = get_workflow(workflow_id, workflow_version) for stage in workflow['stages']: stage['status'] = 'none' pb_config['workflow_parameters'] = pb_config['workflow']['parameters'] pb_config['workflow_id'] = pb_config['workflow']['id'] pb_config['workflow_version'] = pb_config['workflow']['version'] pb_config['workflow_stages'] = workflow['stages'] pb_config.pop('workflow', None)
[ "def", "_update_workflow_definition", "(", "pb_config", ":", "dict", ")", ":", "known_workflows", "=", "get_workflows", "(", ")", "workflow_id", "=", "pb_config", "[", "'workflow'", "]", "[", "'id'", "]", "workflow_version", "=", "pb_config", "[", "'workflow'", ...
Update the PB configuration workflow definition. Args: pb_config (dict): PB configuration dictionary Raises: RunTimeError, if the workflow definition (id, version) specified in the sbi_config is not known.
[ "Update", "the", "PB", "configuration", "workflow", "definition", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/configuration_db/sip_config_db/scheduling/scheduling_block_instance.py#L211-L236
17,381
SKA-ScienceDataProcessor/integration-prototype
sip/examples/flask_processing_controller/app/api/home.py
root
def root(): """Placeholder root url for the PCI. Ideally this should never be called! """ response = { "links": { "message": "Welcome to the SIP Processing Controller Interface", "items": [ {"href": "{}health".format(request.url)}, {"href": "{}subarrays".format(request.url)}, {"href": "{}scheduling_blocks".format(request.url)}, {"href": "{}processing_blocks".format(request.url)} ] } } return response, HTTPStatus.OK
python
def root(): response = { "links": { "message": "Welcome to the SIP Processing Controller Interface", "items": [ {"href": "{}health".format(request.url)}, {"href": "{}subarrays".format(request.url)}, {"href": "{}scheduling_blocks".format(request.url)}, {"href": "{}processing_blocks".format(request.url)} ] } } return response, HTTPStatus.OK
[ "def", "root", "(", ")", ":", "response", "=", "{", "\"links\"", ":", "{", "\"message\"", ":", "\"Welcome to the SIP Processing Controller Interface\"", ",", "\"items\"", ":", "[", "{", "\"href\"", ":", "\"{}health\"", ".", "format", "(", "request", ".", "url", ...
Placeholder root url for the PCI. Ideally this should never be called!
[ "Placeholder", "root", "url", "for", "the", "PCI", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/examples/flask_processing_controller/app/api/home.py#L11-L27
17,382
SKA-ScienceDataProcessor/integration-prototype
sip/examples/flask_processing_controller/app/old.db/mock/init.py
generate_scheduling_block_id
def generate_scheduling_block_id(num_blocks, project='test'): """Generate a scheduling_block id""" _date = strftime("%Y%m%d", gmtime()) _project = project for i in range(num_blocks): yield '{}-{}-sbi{:03d}'.format(_date, _project, i)
python
def generate_scheduling_block_id(num_blocks, project='test'): _date = strftime("%Y%m%d", gmtime()) _project = project for i in range(num_blocks): yield '{}-{}-sbi{:03d}'.format(_date, _project, i)
[ "def", "generate_scheduling_block_id", "(", "num_blocks", ",", "project", "=", "'test'", ")", ":", "_date", "=", "strftime", "(", "\"%Y%m%d\"", ",", "gmtime", "(", ")", ")", "_project", "=", "project", "for", "i", "in", "range", "(", "num_blocks", ")", ":"...
Generate a scheduling_block id
[ "Generate", "a", "scheduling_block", "id" ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/examples/flask_processing_controller/app/old.db/mock/init.py#L18-L23
17,383
SKA-ScienceDataProcessor/integration-prototype
sip/examples/flask_processing_controller/app/api/utils.py
add_scheduling_block
def add_scheduling_block(config): """Adds a scheduling block to the database, returning a response object""" try: DB.add_sbi(config) except jsonschema.ValidationError as error: error_dict = error.__dict__ for key in error_dict: error_dict[key] = error_dict[key].__str__() error_response = dict(message="Failed to add scheduling block", reason="JSON validation error", details=error_dict) return error_response, HTTPStatus.BAD_REQUEST response = dict(config=config, message='Successfully registered scheduling block ' 'instance with ID: {}'.format(config['id'])) response['links'] = { 'self': '{}scheduling-block/{}'.format(request.url_root, config['id']), 'list': '{}'.format(request.url), 'home': '{}'.format(request.url_root) } return response, HTTPStatus.ACCEPTED
python
def add_scheduling_block(config): try: DB.add_sbi(config) except jsonschema.ValidationError as error: error_dict = error.__dict__ for key in error_dict: error_dict[key] = error_dict[key].__str__() error_response = dict(message="Failed to add scheduling block", reason="JSON validation error", details=error_dict) return error_response, HTTPStatus.BAD_REQUEST response = dict(config=config, message='Successfully registered scheduling block ' 'instance with ID: {}'.format(config['id'])) response['links'] = { 'self': '{}scheduling-block/{}'.format(request.url_root, config['id']), 'list': '{}'.format(request.url), 'home': '{}'.format(request.url_root) } return response, HTTPStatus.ACCEPTED
[ "def", "add_scheduling_block", "(", "config", ")", ":", "try", ":", "DB", ".", "add_sbi", "(", "config", ")", "except", "jsonschema", ".", "ValidationError", "as", "error", ":", "error_dict", "=", "error", ".", "__dict__", "for", "key", "in", "error_dict", ...
Adds a scheduling block to the database, returning a response object
[ "Adds", "a", "scheduling", "block", "to", "the", "database", "returning", "a", "response", "object" ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/examples/flask_processing_controller/app/api/utils.py#L24-L45
17,384
SKA-ScienceDataProcessor/integration-prototype
sip/examples/flask_processing_controller/app/api/utils.py
missing_db_response
def missing_db_response(func): """Decorator to check connection exceptions""" @wraps(func) def with_exception_handling(*args, **kwargs): """Wrapper to check for connection failures""" try: return func(*args, **kwargs) except ConnectionError as error: return (dict(error='Unable to connect to Configuration Db.', error_message=str(error), links=dict(root='{}'.format(get_root_url()))), HTTPStatus.NOT_FOUND) return with_exception_handling
python
def missing_db_response(func): @wraps(func) def with_exception_handling(*args, **kwargs): """Wrapper to check for connection failures""" try: return func(*args, **kwargs) except ConnectionError as error: return (dict(error='Unable to connect to Configuration Db.', error_message=str(error), links=dict(root='{}'.format(get_root_url()))), HTTPStatus.NOT_FOUND) return with_exception_handling
[ "def", "missing_db_response", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "with_exception_handling", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"Wrapper to check for connection failures\"\"\"", "try", ":", "return", "func", "(",...
Decorator to check connection exceptions
[ "Decorator", "to", "check", "connection", "exceptions" ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/examples/flask_processing_controller/app/api/utils.py#L48-L60
17,385
SKA-ScienceDataProcessor/integration-prototype
sip/science_pipeline_workflows/mock_workflow/mock_workflow_stage/task.py
main
def main(): """Run the workflow task.""" log = logging.getLogger('sip.mock_workflow_stage') if len(sys.argv) != 2: log.critical('Expecting JSON string as first argument!') return config = json.loads(sys.argv[1]) log.info('Running mock_workflow_stage (version: %s).', __version__) log.info('Received configuration: %s', json.dumps(config)) log.info('Starting task') i = 0 start_time = time.time() duration = config.get('duration', 20) while time.time() - start_time <= duration: time.sleep(duration / 20) elapsed = time.time() - start_time log.info(" %s %2i / 20 (elapsed %.2f s)", config.get('message', 'Progress '), i + 1, elapsed) i += 1 log.info('Task complete!')
python
def main(): log = logging.getLogger('sip.mock_workflow_stage') if len(sys.argv) != 2: log.critical('Expecting JSON string as first argument!') return config = json.loads(sys.argv[1]) log.info('Running mock_workflow_stage (version: %s).', __version__) log.info('Received configuration: %s', json.dumps(config)) log.info('Starting task') i = 0 start_time = time.time() duration = config.get('duration', 20) while time.time() - start_time <= duration: time.sleep(duration / 20) elapsed = time.time() - start_time log.info(" %s %2i / 20 (elapsed %.2f s)", config.get('message', 'Progress '), i + 1, elapsed) i += 1 log.info('Task complete!')
[ "def", "main", "(", ")", ":", "log", "=", "logging", ".", "getLogger", "(", "'sip.mock_workflow_stage'", ")", "if", "len", "(", "sys", ".", "argv", ")", "!=", "2", ":", "log", ".", "critical", "(", "'Expecting JSON string as first argument!'", ")", "return",...
Run the workflow task.
[ "Run", "the", "workflow", "task", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/science_pipeline_workflows/mock_workflow/mock_workflow_stage/task.py#L12-L36
17,386
SKA-ScienceDataProcessor/integration-prototype
sip/examples/log_spammer/log_spammer.py
main
def main(sleep_length=0.1): """Log to stdout using python logging in a while loop""" log = logging.getLogger('sip.examples.log_spammer') log.info('Starting to spam log messages every %fs', sleep_length) counter = 0 try: while True: log.info('Hello %06i (log_spammer: %s, sip logging: %s)', counter, _version.__version__, __version__) counter += 1 time.sleep(sleep_length) except KeyboardInterrupt: log.info('Exiting...')
python
def main(sleep_length=0.1): log = logging.getLogger('sip.examples.log_spammer') log.info('Starting to spam log messages every %fs', sleep_length) counter = 0 try: while True: log.info('Hello %06i (log_spammer: %s, sip logging: %s)', counter, _version.__version__, __version__) counter += 1 time.sleep(sleep_length) except KeyboardInterrupt: log.info('Exiting...')
[ "def", "main", "(", "sleep_length", "=", "0.1", ")", ":", "log", "=", "logging", ".", "getLogger", "(", "'sip.examples.log_spammer'", ")", "log", ".", "info", "(", "'Starting to spam log messages every %fs'", ",", "sleep_length", ")", "counter", "=", "0", "try",...
Log to stdout using python logging in a while loop
[ "Log", "to", "stdout", "using", "python", "logging", "in", "a", "while", "loop" ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/examples/log_spammer/log_spammer.py#L11-L24
17,387
SKA-ScienceDataProcessor/integration-prototype
sip/science_pipeline_workflows/ical_dask/pipelines/imaging_processing.py
init_logging
def init_logging(): """Initialise Python logging.""" fmt = '%(asctime)s.%(msecs)03d | %(name)-60s | %(levelname)-7s ' \ '| %(message)s' logging.basicConfig(format=fmt, datefmt='%H:%M:%S', level=logging.DEBUG)
python
def init_logging(): fmt = '%(asctime)s.%(msecs)03d | %(name)-60s | %(levelname)-7s ' \ '| %(message)s' logging.basicConfig(format=fmt, datefmt='%H:%M:%S', level=logging.DEBUG)
[ "def", "init_logging", "(", ")", ":", "fmt", "=", "'%(asctime)s.%(msecs)03d | %(name)-60s | %(levelname)-7s '", "'| %(message)s'", "logging", ".", "basicConfig", "(", "format", "=", "fmt", ",", "datefmt", "=", "'%H:%M:%S'", ",", "level", "=", "logging", ".", "DEBUG"...
Initialise Python logging.
[ "Initialise", "Python", "logging", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/science_pipeline_workflows/ical_dask/pipelines/imaging_processing.py#L38-L42
17,388
SKA-ScienceDataProcessor/integration-prototype
sip/examples/flask_processing_controller/app/api/subarray.py
get
def get(sub_array_id): """Sub array detail resource. This method will list scheduling blocks and processing blocks in the specified sub-array. """ if not re.match(r'^subarray-0[0-9]|subarray-1[0-5]$', sub_array_id): response = dict(error='Invalid sub-array ID specified "{}" does not ' 'match sub-array ID naming convention ' '(ie. subarray-[00-15]).'. format(sub_array_id)) return response, HTTPStatus.BAD_REQUEST if sub_array_id not in DB.get_sub_array_ids(): response = dict(error='Sub-array "{}" does not currently exist. ' 'Known sub-arrays = {}' .format(sub_array_id, DB.get_sub_array_ids())) return response, HTTPStatus.NOT_FOUND block_ids = DB.get_sub_array_sbi_ids(sub_array_id) _blocks = [b for b in DB.get_block_details(block_ids)] response = dict(scheduling_blocks=[]) _url = get_root_url() for block in _blocks: block['links'] = { 'self': '{}/scheduling-block/{}'.format(_url, block['id']) } response['scheduling_blocks'].append(block) response['links'] = { 'self': '{}'.format(request.url), 'list': '{}/sub-arrays'.format(_url), 'home': '{}'.format(_url), } return response, HTTPStatus.OK
python
def get(sub_array_id): if not re.match(r'^subarray-0[0-9]|subarray-1[0-5]$', sub_array_id): response = dict(error='Invalid sub-array ID specified "{}" does not ' 'match sub-array ID naming convention ' '(ie. subarray-[00-15]).'. format(sub_array_id)) return response, HTTPStatus.BAD_REQUEST if sub_array_id not in DB.get_sub_array_ids(): response = dict(error='Sub-array "{}" does not currently exist. ' 'Known sub-arrays = {}' .format(sub_array_id, DB.get_sub_array_ids())) return response, HTTPStatus.NOT_FOUND block_ids = DB.get_sub_array_sbi_ids(sub_array_id) _blocks = [b for b in DB.get_block_details(block_ids)] response = dict(scheduling_blocks=[]) _url = get_root_url() for block in _blocks: block['links'] = { 'self': '{}/scheduling-block/{}'.format(_url, block['id']) } response['scheduling_blocks'].append(block) response['links'] = { 'self': '{}'.format(request.url), 'list': '{}/sub-arrays'.format(_url), 'home': '{}'.format(_url), } return response, HTTPStatus.OK
[ "def", "get", "(", "sub_array_id", ")", ":", "if", "not", "re", ".", "match", "(", "r'^subarray-0[0-9]|subarray-1[0-5]$'", ",", "sub_array_id", ")", ":", "response", "=", "dict", "(", "error", "=", "'Invalid sub-array ID specified \"{}\" does not '", "'match sub-array...
Sub array detail resource. This method will list scheduling blocks and processing blocks in the specified sub-array.
[ "Sub", "array", "detail", "resource", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/examples/flask_processing_controller/app/api/subarray.py#L20-L52
17,389
SKA-ScienceDataProcessor/integration-prototype
sip/examples/flask_processing_controller/app/api/subarray.py
get_scheduling_block
def get_scheduling_block(sub_array_id, block_id): """Return the list of scheduling blocks instances associated with the sub array""" block_ids = DB.get_sub_array_sbi_ids(sub_array_id) if block_id in block_ids: block = DB.get_block_details([block_id]).__next__() return block, HTTPStatus.OK return dict(error="unknown id"), HTTPStatus.NOT_FOUND
python
def get_scheduling_block(sub_array_id, block_id): block_ids = DB.get_sub_array_sbi_ids(sub_array_id) if block_id in block_ids: block = DB.get_block_details([block_id]).__next__() return block, HTTPStatus.OK return dict(error="unknown id"), HTTPStatus.NOT_FOUND
[ "def", "get_scheduling_block", "(", "sub_array_id", ",", "block_id", ")", ":", "block_ids", "=", "DB", ".", "get_sub_array_sbi_ids", "(", "sub_array_id", ")", "if", "block_id", "in", "block_ids", ":", "block", "=", "DB", ".", "get_block_details", "(", "[", "bl...
Return the list of scheduling blocks instances associated with the sub array
[ "Return", "the", "list", "of", "scheduling", "blocks", "instances", "associated", "with", "the", "sub", "array" ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/examples/flask_processing_controller/app/api/subarray.py#L76-L84
17,390
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/configuration_db/setup.py
package_files
def package_files(directory): """Get list of data files to add to the package.""" paths = [] for (path, _, file_names) in walk(directory): for filename in file_names: paths.append(join('..', path, filename)) return paths
python
def package_files(directory): paths = [] for (path, _, file_names) in walk(directory): for filename in file_names: paths.append(join('..', path, filename)) return paths
[ "def", "package_files", "(", "directory", ")", ":", "paths", "=", "[", "]", "for", "(", "path", ",", "_", ",", "file_names", ")", "in", "walk", "(", "directory", ")", ":", "for", "filename", "in", "file_names", ":", "paths", ".", "append", "(", "join...
Get list of data files to add to the package.
[ "Get", "list", "of", "data", "files", "to", "add", "to", "the", "package", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/configuration_db/setup.py#L9-L15
17,391
SKA-ScienceDataProcessor/integration-prototype
sip/tango_control/tango_master/app/sdp_master_ds.py
register_master
def register_master(): """Register the SDP Master device.""" tango_db = Database() device = "sip_sdp/elt/master" device_info = DbDevInfo() device_info._class = "SDPMasterDevice" device_info.server = "sdp_master_ds/1" device_info.name = device devices = tango_db.get_device_name(device_info.server, device_info._class) if device not in devices: LOG.info('Registering device "%s" with device server "%s"', device_info.name, device_info.server) tango_db.add_device(device_info)
python
def register_master(): tango_db = Database() device = "sip_sdp/elt/master" device_info = DbDevInfo() device_info._class = "SDPMasterDevice" device_info.server = "sdp_master_ds/1" device_info.name = device devices = tango_db.get_device_name(device_info.server, device_info._class) if device not in devices: LOG.info('Registering device "%s" with device server "%s"', device_info.name, device_info.server) tango_db.add_device(device_info)
[ "def", "register_master", "(", ")", ":", "tango_db", "=", "Database", "(", ")", "device", "=", "\"sip_sdp/elt/master\"", "device_info", "=", "DbDevInfo", "(", ")", "device_info", ".", "_class", "=", "\"SDPMasterDevice\"", "device_info", ".", "server", "=", "\"sd...
Register the SDP Master device.
[ "Register", "the", "SDP", "Master", "device", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/tango_control/tango_master/app/sdp_master_ds.py#L21-L33
17,392
SKA-ScienceDataProcessor/integration-prototype
sip/tango_control/tango_master/app/sdp_master_ds.py
main
def main(args=None, **kwargs): """Run the Tango SDP Master device server.""" LOG.info('Starting %s', __service_id__) return run([SDPMasterDevice], verbose=True, msg_stream=sys.stdout, args=args, **kwargs)
python
def main(args=None, **kwargs): LOG.info('Starting %s', __service_id__) return run([SDPMasterDevice], verbose=True, msg_stream=sys.stdout, args=args, **kwargs)
[ "def", "main", "(", "args", "=", "None", ",", "*", "*", "kwargs", ")", ":", "LOG", ".", "info", "(", "'Starting %s'", ",", "__service_id__", ")", "return", "run", "(", "[", "SDPMasterDevice", "]", ",", "verbose", "=", "True", ",", "msg_stream", "=", ...
Run the Tango SDP Master device server.
[ "Run", "the", "Tango", "SDP", "Master", "device", "server", "." ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/tango_control/tango_master/app/sdp_master_ds.py#L36-L40
17,393
SKA-ScienceDataProcessor/integration-prototype
sip/science_pipeline_workflows/receive_pss/csp_pss_sender/app/__main__.py
main
def main(): """Main script function""" # Create simulation object, and start streaming SPEAD heaps sender = PulsarSender() # Parse command line arguments args = parse_command_line() # Initialise logging. _log = _init_log(level=logging.DEBUG if args.verbose else logging.INFO) # Load configuration. _log.info('Loading config: %s', args.config_file.name) _config = json.load(args.config_file) if args.print_settings: _log.debug('Settings:\n %s', json.dumps(_config, indent=4, sort_keys=True)) sender.send(_config, _log, 1, 1)
python
def main(): # Create simulation object, and start streaming SPEAD heaps sender = PulsarSender() # Parse command line arguments args = parse_command_line() # Initialise logging. _log = _init_log(level=logging.DEBUG if args.verbose else logging.INFO) # Load configuration. _log.info('Loading config: %s', args.config_file.name) _config = json.load(args.config_file) if args.print_settings: _log.debug('Settings:\n %s', json.dumps(_config, indent=4, sort_keys=True)) sender.send(_config, _log, 1, 1)
[ "def", "main", "(", ")", ":", "# Create simulation object, and start streaming SPEAD heaps", "sender", "=", "PulsarSender", "(", ")", "# Parse command line arguments", "args", "=", "parse_command_line", "(", ")", "# Initialise logging.", "_log", "=", "_init_log", "(", "le...
Main script function
[ "Main", "script", "function" ]
8c8006de6ad71dcd44114b0338780738079c87d4
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/science_pipeline_workflows/receive_pss/csp_pss_sender/app/__main__.py#L46-L63
17,394
DiamondLightSource/python-workflows
workflows/services/common_service.py
CommonService.start_transport
def start_transport(self): """If a transport object has been defined then connect it now.""" if self.transport: if self.transport.connect(): self.log.debug("Service successfully connected to transport layer") else: raise RuntimeError("Service could not connect to transport layer") # direct all transport callbacks into the main queue self._transport_interceptor_counter = itertools.count() self.transport.subscription_callback_set_intercept( self._transport_interceptor ) else: self.log.debug("No transport layer defined for service. Skipping.")
python
def start_transport(self): if self.transport: if self.transport.connect(): self.log.debug("Service successfully connected to transport layer") else: raise RuntimeError("Service could not connect to transport layer") # direct all transport callbacks into the main queue self._transport_interceptor_counter = itertools.count() self.transport.subscription_callback_set_intercept( self._transport_interceptor ) else: self.log.debug("No transport layer defined for service. Skipping.")
[ "def", "start_transport", "(", "self", ")", ":", "if", "self", ".", "transport", ":", "if", "self", ".", "transport", ".", "connect", "(", ")", ":", "self", ".", "log", ".", "debug", "(", "\"Service successfully connected to transport layer\"", ")", "else", ...
If a transport object has been defined then connect it now.
[ "If", "a", "transport", "object", "has", "been", "defined", "then", "connect", "it", "now", "." ]
7ef47b457655b96f4d2ef7ee9863cf1b6d20e023
https://github.com/DiamondLightSource/python-workflows/blob/7ef47b457655b96f4d2ef7ee9863cf1b6d20e023/workflows/services/common_service.py#L170-L183
17,395
DiamondLightSource/python-workflows
workflows/services/common_service.py
CommonService._transport_interceptor
def _transport_interceptor(self, callback): """Takes a callback function and returns a function that takes headers and messages and places them on the main service queue.""" def add_item_to_queue(header, message): queue_item = ( Priority.TRANSPORT, next( self._transport_interceptor_counter ), # insertion sequence to keep messages in order (callback, header, message), ) self.__queue.put( queue_item ) # Block incoming transport until insertion completes return add_item_to_queue
python
def _transport_interceptor(self, callback): def add_item_to_queue(header, message): queue_item = ( Priority.TRANSPORT, next( self._transport_interceptor_counter ), # insertion sequence to keep messages in order (callback, header, message), ) self.__queue.put( queue_item ) # Block incoming transport until insertion completes return add_item_to_queue
[ "def", "_transport_interceptor", "(", "self", ",", "callback", ")", ":", "def", "add_item_to_queue", "(", "header", ",", "message", ")", ":", "queue_item", "=", "(", "Priority", ".", "TRANSPORT", ",", "next", "(", "self", ".", "_transport_interceptor_counter", ...
Takes a callback function and returns a function that takes headers and messages and places them on the main service queue.
[ "Takes", "a", "callback", "function", "and", "returns", "a", "function", "that", "takes", "headers", "and", "messages", "and", "places", "them", "on", "the", "main", "service", "queue", "." ]
7ef47b457655b96f4d2ef7ee9863cf1b6d20e023
https://github.com/DiamondLightSource/python-workflows/blob/7ef47b457655b96f4d2ef7ee9863cf1b6d20e023/workflows/services/common_service.py#L185-L201
17,396
DiamondLightSource/python-workflows
workflows/services/common_service.py
CommonService.extend_log
def extend_log(self, field, value): """A context wherein a specified extra field in log messages is populated with a fixed value. This affects all log messages within the context.""" self.__log_extensions.append((field, value)) try: yield except Exception as e: setattr(e, "workflows_log_" + field, value) raise finally: self.__log_extensions.remove((field, value))
python
def extend_log(self, field, value): self.__log_extensions.append((field, value)) try: yield except Exception as e: setattr(e, "workflows_log_" + field, value) raise finally: self.__log_extensions.remove((field, value))
[ "def", "extend_log", "(", "self", ",", "field", ",", "value", ")", ":", "self", ".", "__log_extensions", ".", "append", "(", "(", "field", ",", "value", ")", ")", "try", ":", "yield", "except", "Exception", "as", "e", ":", "setattr", "(", "e", ",", ...
A context wherein a specified extra field in log messages is populated with a fixed value. This affects all log messages within the context.
[ "A", "context", "wherein", "a", "specified", "extra", "field", "in", "log", "messages", "is", "populated", "with", "a", "fixed", "value", ".", "This", "affects", "all", "log", "messages", "within", "the", "context", "." ]
7ef47b457655b96f4d2ef7ee9863cf1b6d20e023
https://github.com/DiamondLightSource/python-workflows/blob/7ef47b457655b96f4d2ef7ee9863cf1b6d20e023/workflows/services/common_service.py#L216-L226
17,397
DiamondLightSource/python-workflows
workflows/services/common_service.py
CommonService._log_send
def _log_send(self, logrecord): """Forward log records to the frontend.""" for field, value in self.__log_extensions: setattr(logrecord, field, value) self.__send_to_frontend({"band": "log", "payload": logrecord})
python
def _log_send(self, logrecord): for field, value in self.__log_extensions: setattr(logrecord, field, value) self.__send_to_frontend({"band": "log", "payload": logrecord})
[ "def", "_log_send", "(", "self", ",", "logrecord", ")", ":", "for", "field", ",", "value", "in", "self", ".", "__log_extensions", ":", "setattr", "(", "logrecord", ",", "field", ",", "value", ")", "self", ".", "__send_to_frontend", "(", "{", "\"band\"", ...
Forward log records to the frontend.
[ "Forward", "log", "records", "to", "the", "frontend", "." ]
7ef47b457655b96f4d2ef7ee9863cf1b6d20e023
https://github.com/DiamondLightSource/python-workflows/blob/7ef47b457655b96f4d2ef7ee9863cf1b6d20e023/workflows/services/common_service.py#L280-L284
17,398
DiamondLightSource/python-workflows
workflows/services/common_service.py
CommonService.__update_service_status
def __update_service_status(self, statuscode): """Set the internal status of the service object, and notify frontend.""" if self.__service_status != statuscode: self.__service_status = statuscode self.__send_service_status_to_frontend()
python
def __update_service_status(self, statuscode): if self.__service_status != statuscode: self.__service_status = statuscode self.__send_service_status_to_frontend()
[ "def", "__update_service_status", "(", "self", ",", "statuscode", ")", ":", "if", "self", ".", "__service_status", "!=", "statuscode", ":", "self", ".", "__service_status", "=", "statuscode", "self", ".", "__send_service_status_to_frontend", "(", ")" ]
Set the internal status of the service object, and notify frontend.
[ "Set", "the", "internal", "status", "of", "the", "service", "object", "and", "notify", "frontend", "." ]
7ef47b457655b96f4d2ef7ee9863cf1b6d20e023
https://github.com/DiamondLightSource/python-workflows/blob/7ef47b457655b96f4d2ef7ee9863cf1b6d20e023/workflows/services/common_service.py#L296-L300
17,399
DiamondLightSource/python-workflows
workflows/services/common_service.py
CommonService._set_name
def _set_name(self, name): """Set a new name for this service, and notify the frontend accordingly.""" self._service_name = name self.__send_to_frontend({"band": "set_name", "name": self._service_name})
python
def _set_name(self, name): self._service_name = name self.__send_to_frontend({"band": "set_name", "name": self._service_name})
[ "def", "_set_name", "(", "self", ",", "name", ")", ":", "self", ".", "_service_name", "=", "name", "self", ".", "__send_to_frontend", "(", "{", "\"band\"", ":", "\"set_name\"", ",", "\"name\"", ":", "self", ".", "_service_name", "}", ")" ]
Set a new name for this service, and notify the frontend accordingly.
[ "Set", "a", "new", "name", "for", "this", "service", "and", "notify", "the", "frontend", "accordingly", "." ]
7ef47b457655b96f4d2ef7ee9863cf1b6d20e023
https://github.com/DiamondLightSource/python-workflows/blob/7ef47b457655b96f4d2ef7ee9863cf1b6d20e023/workflows/services/common_service.py#L312-L315